add a new IRQ save spinlock implementation

- avoids a deadlock
- usable in a interrupt handler
This commit is contained in:
Stefan Lankes 2011-04-18 15:12:18 +02:00
parent 68281c8ad0
commit e94c31d03b
4 changed files with 97 additions and 46 deletions

View file

@ -26,6 +26,7 @@
#include <metalsvm/errno.h>
#include <asm/irqflags.h>
#include <asm/atomic.h>
#include <asm/processor.h>
#ifdef __cplusplus
extern "C" {
@ -44,8 +45,12 @@ inline static int spinlock_init(spinlock_t* s) {
}
inline static int spinlock_destroy(spinlock_t* s) {
if (BUILTIN_EXPECT(!s, 0))
return -EINVAL;
s->owner = MAX_TASKS;
s->counter = 0;
return 0;
}
@ -61,8 +66,9 @@ inline static int spinlock_lock(spinlock_t* s) {
}
ticket = atomic_int32_inc(&s->queue);
while(atomic_int32_read(&s->dequeue) != ticket)
;
while(atomic_int32_read(&s->dequeue) != ticket) {
NOP1;
}
s->owner = per_core(current_task)->id;
s->counter = 1;
@ -82,44 +88,70 @@ inline static int spinlock_unlock(spinlock_t* s) {
return 0;
}
inline static int spinlock_lock_irqsave(spinlock_t* s) {
uint32_t flags;
int ret;
inline static int spinlock_irqsave_init(spinlock_irqsave_t* s) {
if (BUILTIN_EXPECT(!s, 0))
return -EINVAL;
flags = irq_nested_disable();
ret = spinlock_lock(s);
if (ret) {
irq_nested_enable(flags);
return ret;
}
atomic_int32_set(&s->queue, 0);
atomic_int32_set(&s->dequeue, 1);
s->flags = 0;
s->coreid = (uint32_t)-1;
s->counter = 0;
if (!ret && (s->counter == 1))
s->flags = flags;
return ret;
return 0;
}
inline static int spinlock_unlock_irqsave(spinlock_t* s) {
int ret, restore = 0;
uint32_t flags = 0;
inline static int spinlock_irqsave_destroy(spinlock_irqsave_t* s) {
if (BUILTIN_EXPECT(!s, 0))
return -EINVAL;
s->flags = 0;
s->coreid = (uint32_t)-1;
s->counter = 0;
return 0;
}
inline static int spinlock_irqsave_lock(spinlock_irqsave_t* s) {
uint32_t flags;
int32_t ticket;
if (BUILTIN_EXPECT(!s, 0))
return -EINVAL;
if (s->counter == 1) {
restore = 1;
flags = s->flags;
flags = irq_nested_disable();
if (s->coreid == CORE_ID) {
s->counter++;
return 0;
}
ret = spinlock_unlock(s);
if (!ret && restore)
irq_nested_enable(flags);
ticket = atomic_int32_inc(&s->queue);
while (atomic_int32_read(&s->dequeue) != ticket)
NOP1;
s->coreid = CORE_ID;
s->flags = flags;
s->counter = 1;
return 0;
}
return ret;
inline static int spinlock_irqsave_unlock(spinlock_irqsave_t* s) {
uint32_t flags;
if (BUILTIN_EXPECT(!s, 0))
return -EINVAL;
s->counter--;
if (!s->counter) {
flags = s->flags;
s->coreid = (uint32_t) -1;
s->flags = 0;
atomic_int32_inc(&s->dequeue);
irq_nested_enable(flags);
}
return 0;
}
#ifdef __cplusplus

View file

@ -31,10 +31,17 @@ typedef struct spinlock {
atomic_int32_t queue, dequeue;
tid_t owner;
uint32_t counter;
uint32_t flags;
} spinlock_t;
#define SPINLOCK_INIT { ATOMIC_INIT(0), ATOMIC_INIT(1), MAX_TASKS, 0, 0 }
typedef struct spinlock_irqsave {
atomic_int32_t queue, dequeue;
uint32_t flags;
uint32_t coreid;
uint32_t counter;
} spinlock_irqsave_t;
#define SPINLOCK_INIT { ATOMIC_INIT(0), ATOMIC_INIT(1), MAX_TASKS, 0}
#define SPINLOCK_IRQSAVE_INIT { ATOMIC_INIT(0), ATOMIC_INIT(1), 0, (uint32_t)-1, 0}
#ifdef __cplusplus
}

View file

@ -35,6 +35,7 @@ typedef unsigned int tid_t;
#define per_core(name) name
#define DECLARE_PER_CORE(type, name) extern type name;
#define DEFINE_PER_CORE(type, name, def_value) type name = def_value;
#define CORE_ID 0
#else
#define per_core(name) name[LOGICAL_CPUID].var
#define DECLARE_PER_CORE(type, name) \
@ -42,6 +43,7 @@ typedef unsigned int tid_t;
extern aligned_##name name[MAX_CORES];
#define DEFINE_PER_CORE(type, name, def_value) \
aligned_##name name[MAX_CORES] = {[0 ... MAX_CORES-1] = {def_value}};
#define CORE_ID LOGICAL_CPUID
#endif
/* needed to find the task, which is currently running on this core */

View file

@ -29,12 +29,13 @@
#include <metalsvm/mailbox.h>
#include <metalsvm/syscall.h>
#include <metalsvm/fs.h>
#include <asm/apic.h>
#include <asm/elf.h>
DEFINE_PER_CORE(task_t*, current_task, NULL);
static task_t task_table[MAX_TASKS] = {[0 ... MAX_TASKS-1] = {0, TASK_INVALID, ATOMIC_INIT(0), \
SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL}};
static spinlock_t table_lock = SPINLOCK_INIT;
static spinlock_irqsave_t table_lock = SPINLOCK_IRQSAVE_INIT;
/*
* helper function for the assembly code to determine the current task
@ -63,7 +64,7 @@ static void wakeup_blocked_tasks(int result)
wait_msg_t tmp = { per_core(current_task)->id, result };
unsigned int i;
spinlock_lock_irqsave(&table_lock);
spinlock_irqsave_lock(&table_lock);
/* wake up blocked tasks */
for(i=0; i<MAX_TASKS; i++) {
@ -73,7 +74,7 @@ static void wakeup_blocked_tasks(int result)
}
}
spinlock_unlock_irqsave(&table_lock);
spinlock_irqsave_unlock(&table_lock);
}
static void NORETURN do_exit(int arg) {
@ -131,7 +132,7 @@ static int create_task(tid_t* id, entry_point_t ep, void* arg)
if (BUILTIN_EXPECT(!ep, 0))
return -EINVAL;
spinlock_lock_irqsave(&table_lock);
spinlock_irqsave_lock(&table_lock);
for(i=0; i<MAX_TASKS; i++) {
if (task_table[i].status == TASK_INVALID) {
@ -161,7 +162,7 @@ static int create_task(tid_t* id, entry_point_t ep, void* arg)
}
create_task_out:
spinlock_unlock_irqsave(&table_lock);
spinlock_irqsave_unlock(&table_lock);
return ret;
}
@ -175,7 +176,8 @@ int sys_fork(void)
vma_t* parent;
vma_t* tmp;
spinlock_lock_irqsave(&table_lock);
spinlock_lock(&per_core(current_task)->vma_lock);
spinlock_irqsave_lock(&table_lock);
for(i=0; i<MAX_TASKS; i++) {
if (task_table[i].status == TASK_INVALID) {
@ -191,7 +193,6 @@ int sys_fork(void)
spinlock_init(&task_table[i].vma_lock);
// copy VMA list
spinlock_lock(&per_core(current_task)->vma_lock);
child = &task_table[i].vma_list;
parent = per_core(current_task)->vma_list;
tmp = NULL;
@ -211,7 +212,6 @@ int sys_fork(void)
tmp = *child;
child = &((*child)->next);
}
spinlock_unlock(&per_core(current_task)->vma_lock);
mailbox_wait_msg_init(&task_table[i].inbox);
memset(task_table[i].outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS);
@ -219,8 +219,13 @@ int sys_fork(void)
ret = arch_fork(task_table+i);
if (parent_task != per_core(current_task))
return 0; // Oh, the new child! => leave function
if (parent_task != per_core(current_task)) {
// Oh, the current task is the new child task!
// Leave the function without releasing the locks
// because the locks are already released
// by the parent task!
return 0;
}
if (!ret) {
task_table[i].status = TASK_READY;
@ -231,7 +236,8 @@ int sys_fork(void)
}
create_task_out:
spinlock_unlock_irqsave(&table_lock);
spinlock_irqsave_unlock(&table_lock);
spinlock_unlock(&per_core(current_task)->vma_lock);
return ret;
}
@ -576,7 +582,7 @@ int wakeup_task(tid_t id)
int ret = -EINVAL;
/* avoid nested locking */
spinlock_lock_irqsave(&table_lock);
spinlock_irqsave_lock(&table_lock);
if (task_table[id].status != TASK_BLOCKED) {
kprintf("Task %d is not blocked!\n", id);
@ -585,7 +591,7 @@ int wakeup_task(tid_t id)
ret = 0;
}
spinlock_unlock_irqsave(&table_lock);
spinlock_irqsave_unlock(&table_lock);
return ret;
}
@ -594,14 +600,14 @@ int block_task(tid_t id)
{
int ret = -EINVAL;
spinlock_lock_irqsave(&table_lock);
spinlock_irqsave_lock(&table_lock);
if ((task_table[id].status == TASK_RUNNING) || (task_table[id].status == TASK_READY)) {
task_table[id].status = TASK_BLOCKED;
ret = 0;
} else kprintf("Unable to block task %d!\n", id);
spinlock_unlock_irqsave(&table_lock);
spinlock_irqsave_unlock(&table_lock);
return ret;
}
@ -611,7 +617,9 @@ void scheduler(void)
unsigned int i;
unsigned int new_id;
spinlock_lock(&table_lock);
#if MAX_CORES > 1
spinlock_irqsave_lock(&table_lock);
#endif
/* signalize that this task could be reused */
if (per_core(current_task)->status == TASK_FINISHED)
@ -645,5 +653,7 @@ void scheduler(void)
}
get_task_out:
spinlock_unlock(&table_lock);
#if MAX_CORES > 1
spinlock_irqsave_unlock(&table_lock);
#endif
}