mirror of
https://github.com/warmcat/libwebsockets.git
synced 2025-03-09 00:00:04 +01:00
lws_mutex_refcount
This creates a "pthread mutex with a reference count" using gcc / clang atomic intrinsics + pthreads. Both pt and context locks are moved to use this, pt already had reference counting but it's new for context.
This commit is contained in:
parent
8869a6ce1d
commit
b58fb2dae3
6 changed files with 102 additions and 57 deletions
|
@ -996,7 +996,7 @@ lws_create_vhost(struct lws_context *context,
|
|||
lwsl_err("%s: lws_context_init_client_ssl failed\n", __func__);
|
||||
goto bail1;
|
||||
}
|
||||
lws_context_lock(context);
|
||||
lws_context_lock(context, "create_vhost");
|
||||
n = _lws_vhost_init_server(info, vh);
|
||||
lws_context_unlock(context);
|
||||
if (n < 0) {
|
||||
|
@ -1215,7 +1215,7 @@ lws_create_context(const struct lws_context_creation_info *info)
|
|||
#endif
|
||||
|
||||
#if LWS_MAX_SMP > 1
|
||||
pthread_mutex_init(&context->lock, NULL);
|
||||
lws_mutex_refcount_init(&context->mr);
|
||||
#endif
|
||||
|
||||
#if defined(LWS_WITH_ESP32)
|
||||
|
@ -1844,7 +1844,7 @@ lws_check_deferred_free(struct lws_context *context, int tsi, int force)
|
|||
* because there is nothing left using the vhost to conflict.
|
||||
*/
|
||||
|
||||
lws_context_lock(context); /* ------------------- context { */
|
||||
lws_context_lock(context, "check deferred free"); /* ------ context { */
|
||||
|
||||
lws_start_foreach_ll(struct lws_vhost *, v, context->vhost_list) {
|
||||
if (v->being_destroyed
|
||||
|
@ -2034,7 +2034,7 @@ lws_context_destroy2(struct lws_context *context)
|
|||
lws_check_deferred_free(context, 0, 1);
|
||||
|
||||
#if LWS_MAX_SMP > 1
|
||||
pthread_mutex_destroy(&context->lock);
|
||||
lws_mutex_refcount_destroy(&context->mr);
|
||||
#endif
|
||||
|
||||
if (context->event_loop_ops->destroy_context2)
|
||||
|
|
|
@ -2888,6 +2888,65 @@ lws_strncpy(char *dest, const char *src, size_t size)
|
|||
return dest;
|
||||
}
|
||||
|
||||
#if LWS_MAX_SMP > 1
|
||||
|
||||
void
|
||||
lws_mutex_refcount_init(struct lws_mutex_refcount *mr)
|
||||
{
|
||||
pthread_mutex_init(&mr->lock, NULL);
|
||||
mr->last_lock_reason = NULL;
|
||||
mr->lock_depth = 0;
|
||||
mr->metadata = 0;
|
||||
mr->lock_owner = 0;
|
||||
}
|
||||
|
||||
void
|
||||
lws_mutex_refcount_destroy(struct lws_mutex_refcount *mr)
|
||||
{
|
||||
pthread_mutex_destroy(&mr->lock);
|
||||
}
|
||||
|
||||
void
|
||||
lws_mutex_refcount_lock(struct lws_mutex_refcount *mr, const char *reason)
|
||||
{
|
||||
/* if true, this sequence is atomic because our thread has the lock
|
||||
*
|
||||
* - if true, only guy who can race to make it untrue is our thread,
|
||||
* and we are here.
|
||||
*
|
||||
* - if false, only guy who could race to make it true is our thread,
|
||||
* and we are here
|
||||
*
|
||||
* - it can be false and change to a different tid that is also false
|
||||
*/
|
||||
if (mr->lock_owner == pthread_self()) {
|
||||
/* atomic because we only change it if we own the lock */
|
||||
mr->lock_depth++;
|
||||
return;
|
||||
}
|
||||
|
||||
pthread_mutex_lock(&mr->lock);
|
||||
/* atomic because only we can have the lock */
|
||||
mr->last_lock_reason = reason;
|
||||
mr->lock_owner = pthread_self();
|
||||
mr->lock_depth = 1;
|
||||
//lwsl_notice("tid %d: lock %s\n", mr->tid, reason);
|
||||
}
|
||||
|
||||
void
|
||||
lws_mutex_refcount_unlock(struct lws_mutex_refcount *mr)
|
||||
{
|
||||
if (--mr->lock_depth)
|
||||
/* atomic because only thread that has the lock can unlock */
|
||||
return;
|
||||
|
||||
mr->last_lock_reason = "free";
|
||||
mr->lock_owner = 0;
|
||||
//lwsl_notice("tid %d: unlock %s\n", mr->tid, mr->last_lock_reason);
|
||||
pthread_mutex_unlock(&mr->lock);
|
||||
}
|
||||
|
||||
#endif /* SMP */
|
||||
|
||||
LWS_VISIBLE LWS_EXTERN int
|
||||
lws_is_cgi(struct lws *wsi) {
|
||||
|
|
|
@ -303,6 +303,28 @@ struct lws_foreign_thread_pollfd {
|
|||
int _or;
|
||||
};
|
||||
|
||||
#if LWS_MAX_SMP > 1
|
||||
|
||||
struct lws_mutex_refcount {
|
||||
pthread_mutex_t lock;
|
||||
pthread_t lock_owner;
|
||||
const char *last_lock_reason;
|
||||
char lock_depth;
|
||||
char metadata;
|
||||
};
|
||||
|
||||
void
|
||||
lws_mutex_refcount_init(struct lws_mutex_refcount *mr);
|
||||
|
||||
void
|
||||
lws_mutex_refcount_destroy(struct lws_mutex_refcount *mr);
|
||||
|
||||
void
|
||||
lws_mutex_refcount_lock(struct lws_mutex_refcount *mr, const char *reason);
|
||||
|
||||
void
|
||||
lws_mutex_refcount_unlock(struct lws_mutex_refcount *mr);
|
||||
#endif
|
||||
|
||||
#define LWS_HRTIMER_NOWAIT (0x7fffffffffffffffll)
|
||||
|
||||
|
@ -313,10 +335,8 @@ struct lws_foreign_thread_pollfd {
|
|||
|
||||
struct lws_context_per_thread {
|
||||
#if LWS_MAX_SMP > 1
|
||||
pthread_mutex_t lock;
|
||||
pthread_mutex_t lock_stats;
|
||||
pthread_t lock_owner;
|
||||
const char *last_lock_reason;
|
||||
struct lws_mutex_refcount mr;
|
||||
#endif
|
||||
|
||||
struct lws_context *context;
|
||||
|
@ -379,7 +399,6 @@ struct lws_context_per_thread {
|
|||
|
||||
unsigned char tid;
|
||||
|
||||
unsigned char lock_depth;
|
||||
unsigned char inside_service:1;
|
||||
unsigned char event_loop_foreign:1;
|
||||
unsigned char event_loop_destroy_processing_done:1;
|
||||
|
@ -575,8 +594,7 @@ struct lws_context {
|
|||
struct lws_context_per_thread pt[LWS_MAX_SMP];
|
||||
struct lws_conn_stats conn_stats;
|
||||
#if LWS_MAX_SMP > 1
|
||||
pthread_mutex_t lock;
|
||||
int lock_depth;
|
||||
struct lws_mutex_refcount mr;
|
||||
#endif
|
||||
#ifdef _WIN32
|
||||
/* different implementation between unix and windows */
|
||||
|
@ -1197,7 +1215,7 @@ LWS_EXTERN void lwsl_emit_stderr(int level, const char *line);
|
|||
static LWS_INLINE void
|
||||
lws_pt_mutex_init(struct lws_context_per_thread *pt)
|
||||
{
|
||||
pthread_mutex_init(&pt->lock, NULL);
|
||||
lws_mutex_refcount_init(&pt->mr);
|
||||
pthread_mutex_init(&pt->lock_stats, NULL);
|
||||
}
|
||||
|
||||
|
@ -1205,34 +1223,11 @@ static LWS_INLINE void
|
|||
lws_pt_mutex_destroy(struct lws_context_per_thread *pt)
|
||||
{
|
||||
pthread_mutex_destroy(&pt->lock_stats);
|
||||
pthread_mutex_destroy(&pt->lock);
|
||||
lws_mutex_refcount_destroy(&pt->mr);
|
||||
}
|
||||
|
||||
static LWS_INLINE void
|
||||
lws_pt_lock(struct lws_context_per_thread *pt, const char *reason)
|
||||
{
|
||||
if (pt->lock_owner == pthread_self()) {
|
||||
pt->lock_depth++;
|
||||
return;
|
||||
}
|
||||
pthread_mutex_lock(&pt->lock);
|
||||
pt->last_lock_reason = reason;
|
||||
pt->lock_owner = pthread_self();
|
||||
//lwsl_notice("tid %d: lock %s\n", pt->tid, reason);
|
||||
}
|
||||
|
||||
static LWS_INLINE void
|
||||
lws_pt_unlock(struct lws_context_per_thread *pt)
|
||||
{
|
||||
if (pt->lock_depth) {
|
||||
pt->lock_depth--;
|
||||
return;
|
||||
}
|
||||
pt->last_lock_reason = "free";
|
||||
pt->lock_owner = 0;
|
||||
//lwsl_notice("tid %d: unlock %s\n", pt->tid, pt->last_lock_reason);
|
||||
pthread_mutex_unlock(&pt->lock);
|
||||
}
|
||||
#define lws_pt_lock(pt, reason) lws_mutex_refcount_lock(&pt->mr, reason)
|
||||
#define lws_pt_unlock(pt) lws_mutex_refcount_unlock(&pt->mr)
|
||||
|
||||
static LWS_INLINE void
|
||||
lws_pt_stats_lock(struct lws_context_per_thread *pt)
|
||||
|
@ -1246,17 +1241,8 @@ lws_pt_stats_unlock(struct lws_context_per_thread *pt)
|
|||
pthread_mutex_unlock(&pt->lock_stats);
|
||||
}
|
||||
|
||||
static LWS_INLINE void
|
||||
lws_context_lock(struct lws_context *context)
|
||||
{
|
||||
pthread_mutex_lock(&context->lock);
|
||||
}
|
||||
|
||||
static LWS_INLINE void
|
||||
lws_context_unlock(struct lws_context *context)
|
||||
{
|
||||
pthread_mutex_unlock(&context->lock);
|
||||
}
|
||||
#define lws_context_lock(c, reason) lws_mutex_refcount_lock(&c->mr, reason)
|
||||
#define lws_context_unlock(c) lws_mutex_refcount_unlock(&c->mr)
|
||||
|
||||
static LWS_INLINE void
|
||||
lws_vhost_lock(struct lws_vhost *vhost)
|
||||
|
@ -1276,7 +1262,7 @@ lws_vhost_unlock(struct lws_vhost *vhost)
|
|||
#define lws_pt_mutex_destroy(_a) (void)(_a)
|
||||
#define lws_pt_lock(_a, b) (void)(_a)
|
||||
#define lws_pt_unlock(_a) (void)(_a)
|
||||
#define lws_context_lock(_a) (void)(_a)
|
||||
#define lws_context_lock(_a, _b) (void)(_a)
|
||||
#define lws_context_unlock(_a) (void)(_a)
|
||||
#define lws_vhost_lock(_a) (void)(_a)
|
||||
#define lws_vhost_unlock(_a) (void)(_a)
|
||||
|
|
|
@ -772,7 +772,7 @@ lws_service_periodic_checks(struct lws_context *context,
|
|||
* interface missing before
|
||||
*/
|
||||
|
||||
lws_context_lock(context);
|
||||
lws_context_lock(context, "periodic checks");
|
||||
lws_start_foreach_llp(struct lws_vhost **, pv,
|
||||
context->no_listener_vhost_list) {
|
||||
struct lws_vhost *v = *pv;
|
||||
|
|
|
@ -95,7 +95,7 @@ lws_get_or_create_peer(struct lws_vhost *vhost, lws_sockfd_type sockfd)
|
|||
|
||||
hash = hash % context->pl_hash_elements;
|
||||
|
||||
lws_context_lock(context); /* <====================================== */
|
||||
lws_context_lock(context, "peer search"); /* <======================= */
|
||||
|
||||
lws_start_foreach_ll(struct lws_peer *, peerx,
|
||||
context->pl_hash_table[hash]) {
|
||||
|
@ -163,7 +163,7 @@ lws_peer_cull_peer_wait_list(struct lws_context *context)
|
|||
if (context->next_cull && t < context->next_cull)
|
||||
return;
|
||||
|
||||
lws_context_lock(context); /* <====================================== */
|
||||
lws_context_lock(context, "peer cull"); /* <========================= */
|
||||
|
||||
context->next_cull = t + 5;
|
||||
|
||||
|
@ -190,7 +190,7 @@ lws_peer_add_wsi(struct lws_context *context, struct lws_peer *peer,
|
|||
if (!peer)
|
||||
return;
|
||||
|
||||
lws_context_lock(context); /* <====================================== */
|
||||
lws_context_lock(context, "peer add"); /* <========================== */
|
||||
|
||||
peer->count_wsi++;
|
||||
wsi->peer = peer;
|
||||
|
@ -225,7 +225,7 @@ lws_peer_track_wsi_close(struct lws_context *context, struct lws_peer *peer)
|
|||
if (!peer)
|
||||
return;
|
||||
|
||||
lws_context_lock(context); /* <====================================== */
|
||||
lws_context_lock(context, "peer wsi close"); /* <==================== */
|
||||
|
||||
assert(peer->count_wsi);
|
||||
peer->count_wsi--;
|
||||
|
@ -273,7 +273,7 @@ lws_peer_track_ah_detach(struct lws_context *context, struct lws_peer *peer)
|
|||
if (!peer)
|
||||
return;
|
||||
|
||||
lws_context_lock(context); /* <====================================== */
|
||||
lws_context_lock(context, "peer ah detach"); /* <==================== */
|
||||
assert(peer->http.count_ah);
|
||||
peer->http.count_ah--;
|
||||
lws_context_unlock(context); /* ====================================> */
|
||||
|
|
|
@ -241,7 +241,7 @@ lws_header_table_attach(struct lws *wsi, int autoservice)
|
|||
pt->http.ah_count_in_use++;
|
||||
|
||||
#if defined(LWS_WITH_PEER_LIMITS) && (defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2))
|
||||
lws_context_lock(context); /* <====================================== */
|
||||
lws_context_lock(context, "ah attach"); /* <========================= */
|
||||
if (wsi->peer)
|
||||
wsi->peer->http.count_ah++;
|
||||
lws_context_unlock(context); /* ====================================> */
|
||||
|
@ -360,7 +360,7 @@ int __lws_header_table_detach(struct lws *wsi, int autoservice)
|
|||
|
||||
__lws_header_table_reset(wsi, autoservice);
|
||||
#if defined(LWS_WITH_PEER_LIMITS) && (defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2))
|
||||
lws_context_lock(context); /* <====================================== */
|
||||
lws_context_lock(context, "ah detach"); /* <========================= */
|
||||
if (wsi->peer)
|
||||
wsi->peer->http.count_ah++;
|
||||
lws_context_unlock(context); /* ====================================> */
|
||||
|
|
Loading…
Add table
Reference in a new issue