Keep CWC in a separate lock domain to avoid waiting for global_lock

This commit is contained in:
Andreas Öman 2011-01-25 22:48:01 +01:00
parent a137baf845
commit 5b640b2ed0
8 changed files with 33 additions and 25 deletions

View file

@ -409,6 +409,7 @@ static const dtable_class_t access_dtc = {
.dtc_record_delete = access_record_delete,
.dtc_read_access = ACCESS_ADMIN,
.dtc_write_access = ACCESS_ADMIN,
.dtc_mutex = &global_lock,
};
/**

View file

@ -841,6 +841,7 @@ static const dtable_class_t capmt_dtc = {
.dtc_record_delete = capmt_entry_delete,
.dtc_read_access = ACCESS_ADMIN,
.dtc_write_access = ACCESS_ADMIN,
.dtc_mutex = &global_lock,
};
/**

View file

@ -858,6 +858,7 @@ static const dtable_class_t channel_tags_dtc = {
.dtc_record_delete = channel_tag_record_delete,
.dtc_read_access = ACCESS_ADMIN,
.dtc_write_access = ACCESS_ADMIN,
.dtc_mutex = &global_lock,
};

View file

@ -95,6 +95,7 @@ TAILQ_HEAD(cwc_message_queue, cwc_message);
LIST_HEAD(ecm_section_list, ecm_section);
static struct cwc_queue cwcs;
static pthread_cond_t cwc_config_changed;
static pthread_mutex_t cwc_mutex;
static char *crypt_md5(const char *pw, const char *salt);
/**
@ -202,7 +203,7 @@ typedef struct cwc {
int cwc_writer_running;
struct cwc_message_queue cwc_writeq;
TAILQ_ENTRY(cwc) cwc_link; /* Linkage protected via global_lock */
TAILQ_ENTRY(cwc) cwc_link; /* Linkage protected via cwc_mutex */
struct cwc_service_list cwc_services;
@ -770,18 +771,16 @@ handle_ecm_reply(cwc_service_t *ct, ecm_section_t *es, uint8_t *msg,
"Obtained key for for service \"%s\" in %lld ms, from %s",
t->s_svcname, delay, ct->cs_cwc->cwc_hostname);
pthread_mutex_lock(&t->s_stream_mutex);
ct->cs_keystate = CS_RESOLVED;
memcpy(ct->cs_cw, msg + 3, 16);
ct->cs_pending_cw_update = 1;
pthread_mutex_unlock(&t->s_stream_mutex);
}
}
/**
* Handle running reply
* global_lock is held
* cwc_mutex is held
*/
static int
cwc_running_reply(cwc_t *cwc, uint8_t msgtype, uint8_t *msg, int len)
@ -836,9 +835,9 @@ cwc_read(cwc_t *cwc, void *buf, size_t len, int timeout)
{
int r;
pthread_mutex_unlock(&global_lock);
pthread_mutex_unlock(&cwc_mutex);
r = tcp_read_timeout(cwc->cwc_fd, buf, len, timeout);
pthread_mutex_lock(&global_lock);
pthread_mutex_lock(&cwc_mutex);
if(cwc_must_break(cwc))
return ECONNABORTED;
@ -1031,23 +1030,23 @@ cwc_thread(void *aux)
struct timespec ts;
int attempts = 0;
pthread_mutex_lock(&global_lock);
pthread_mutex_lock(&cwc_mutex);
while(cwc->cwc_running) {
while(cwc->cwc_running && cwc->cwc_enabled == 0)
pthread_cond_wait(&cwc->cwc_cond, &global_lock);
pthread_cond_wait(&cwc->cwc_cond, &cwc_mutex);
snprintf(hostname, sizeof(hostname), "%s", cwc->cwc_hostname);
port = cwc->cwc_port;
tvhlog(LOG_INFO, "cwc", "Attemping to connect to %s:%d", hostname, port);
pthread_mutex_unlock(&global_lock);
pthread_mutex_unlock(&cwc_mutex);
fd = tcp_connect(hostname, port, errbuf, sizeof(errbuf), 10);
pthread_mutex_lock(&global_lock);
pthread_mutex_lock(&cwc_mutex);
if(fd == -1) {
attempts++;
@ -1092,7 +1091,7 @@ cwc_thread(void *aux)
"%s: Automatic connection attempt in in %d seconds",
cwc->cwc_hostname, d);
pthread_cond_timedwait(&cwc_config_changed, &global_lock, &ts);
pthread_cond_timedwait(&cwc_config_changed, &cwc_mutex, &ts);
}
tvhlog(LOG_INFO, "cwc", "%s destroyed", cwc->cwc_hostname);
@ -1110,7 +1109,7 @@ cwc_thread(void *aux)
free((void *)cwc->cwc_hostname);
free(cwc);
pthread_mutex_unlock(&global_lock);
pthread_mutex_unlock(&cwc_mutex);
return NULL;
}
@ -1164,7 +1163,7 @@ cwc_emm(uint8_t *data, int len)
{
cwc_t *cwc;
lock_assert(&global_lock);
pthread_mutex_lock(&cwc_mutex);
TAILQ_FOREACH(cwc, &cwcs, cwc_link) {
if(cwc->cwc_forward_emm && cwc->cwc_writer_running) {
@ -1189,6 +1188,7 @@ cwc_emm(uint8_t *data, int len)
}
}
}
pthread_mutex_unlock(&cwc_mutex);
}
@ -1447,7 +1447,7 @@ cwc_emm_viaccess(cwc_t *cwc, uint8_t *data, int mlen)
}
/**
*
* t->s_streaming_mutex is held
*/
static void
cwc_table_input(struct th_descrambler *td, struct service *t,
@ -1667,7 +1667,7 @@ cwc_descramble(th_descrambler_t *td, service_t *t, struct elementary_stream *st,
}
/**
* global_lock is held
* cwc_mutex is held
* s_stream_mutex is held
*/
static void
@ -1715,7 +1715,7 @@ cwc_find_stream_by_caid(service_t *t, int caid)
/**
* Check if our CAID's matches, and if so, link
*
* global_lock is held
* global_lock is held. Not that we care about that, but either way, it is.
*/
void
cwc_service_start(service_t *t)
@ -1724,7 +1724,7 @@ cwc_service_start(service_t *t)
cwc_service_t *ct;
th_descrambler_t *td;
lock_assert(&global_lock);
pthread_mutex_lock(&cwc_mutex);
TAILQ_FOREACH(cwc, &cwcs, cwc_link) {
if(cwc->cwc_caid == 0)
continue;
@ -1753,6 +1753,7 @@ cwc_service_start(service_t *t)
service_nicename(t), cwc->cwc_hostname, cwc->cwc_port);
}
pthread_mutex_unlock(&cwc_mutex);
}
@ -1762,10 +1763,11 @@ cwc_service_start(service_t *t)
static void
cwc_destroy(cwc_t *cwc)
{
lock_assert(&global_lock);
pthread_mutex_lock(&cwc_mutex);
TAILQ_REMOVE(&cwcs, cwc, cwc_link);
cwc->cwc_running = 0;
pthread_cond_signal(&cwc->cwc_cond);
pthread_mutex_unlock(&cwc_mutex);
}
@ -1890,8 +1892,6 @@ cwc_entry_update(void *opaque, const char *id, htsmsg_t *values, int maycreate)
if((cwc = cwc_entry_find(id, maycreate)) == NULL)
return NULL;
lock_assert(&global_lock);
if((s = htsmsg_get_str(values, "username")) != NULL) {
free(cwc->cwc_username);
cwc->cwc_username = strdup(s);
@ -2025,6 +2025,7 @@ static const dtable_class_t cwc_dtc = {
.dtc_record_delete = cwc_entry_delete,
.dtc_read_access = ACCESS_ADMIN,
.dtc_write_access = ACCESS_ADMIN,
.dtc_mutex = &cwc_mutex,
};
@ -2038,7 +2039,7 @@ cwc_init(void)
dtable_t *dt;
TAILQ_INIT(&cwcs);
pthread_mutex_init(&cwc_mutex, NULL);
pthread_cond_init(&cwc_config_changed, NULL);
dt = dtable_create(&cwc_dtc, "cwc", NULL);

View file

@ -40,6 +40,8 @@ typedef struct dtable_class {
int dtc_read_access;
int dtc_write_access;
pthread_mutex_t *dtc_mutex;
} dtable_class_t;

View file

@ -218,6 +218,7 @@ static const dtable_class_t satconf_dtc = {
.dtc_record_delete = satconf_entry_delete,
.dtc_read_access = ACCESS_ADMIN,
.dtc_write_access = ACCESS_ADMIN,
.dtc_mutex = &global_lock,
};

View file

@ -403,6 +403,7 @@ static const dtable_class_t autorec_dtc = {
.dtc_record_delete = autorec_record_delete,
.dtc_read_access = ACCESS_RECORDER,
.dtc_write_access = ACCESS_RECORDER,
.dtc_mutex = &global_lock,
};
/**

View file

@ -230,7 +230,7 @@ extjs_tablemgr(http_connection_t *hc, const char *remain, void *opaque)
in = entries != NULL ? htsmsg_json_deserialize(entries) : NULL;
pthread_mutex_lock(&global_lock);
pthread_mutex_lock(dt->dt_dtc->dtc_mutex);
if(!strcmp(op, "create")) {
if(http_access_verify(hc, dt->dt_dtc->dtc_write_access))
@ -264,15 +264,15 @@ extjs_tablemgr(http_connection_t *hc, const char *remain, void *opaque)
} else {
bad:
pthread_mutex_unlock(&global_lock);
pthread_mutex_lock(dt->dt_dtc->dtc_mutex);
return HTTP_STATUS_BAD_REQUEST;
noaccess:
pthread_mutex_unlock(&global_lock);
pthread_mutex_lock(dt->dt_dtc->dtc_mutex);
return HTTP_STATUS_BAD_REQUEST;
}
pthread_mutex_unlock(&global_lock);
pthread_mutex_lock(dt->dt_dtc->dtc_mutex);
if(in != NULL)
htsmsg_destroy(in);