From e94c31d03b0be3e3143febfa75c2ea718284b8f4 Mon Sep 17 00:00:00 2001 From: Stefan Lankes Date: Mon, 18 Apr 2011 15:12:18 +0200 Subject: [PATCH] add a new IRQ save spinlock implementation - avoids a deadlock - usable in a interrupt handler --- include/metalsvm/spinlock.h | 86 +++++++++++++++++++++---------- include/metalsvm/spinlock_types.h | 11 +++- include/metalsvm/stddef.h | 2 + kernel/tasks.c | 44 ++++++++++------ 4 files changed, 97 insertions(+), 46 deletions(-) diff --git a/include/metalsvm/spinlock.h b/include/metalsvm/spinlock.h index e6fcd15f..1d62c968 100644 --- a/include/metalsvm/spinlock.h +++ b/include/metalsvm/spinlock.h @@ -26,6 +26,7 @@ #include #include #include +#include #ifdef __cplusplus extern "C" { @@ -44,8 +45,12 @@ inline static int spinlock_init(spinlock_t* s) { } inline static int spinlock_destroy(spinlock_t* s) { + if (BUILTIN_EXPECT(!s, 0)) + return -EINVAL; + s->owner = MAX_TASKS; s->counter = 0; + return 0; } @@ -61,8 +66,9 @@ inline static int spinlock_lock(spinlock_t* s) { } ticket = atomic_int32_inc(&s->queue); - while(atomic_int32_read(&s->dequeue) != ticket) - ; + while(atomic_int32_read(&s->dequeue) != ticket) { + NOP1; + } s->owner = per_core(current_task)->id; s->counter = 1; @@ -82,44 +88,70 @@ inline static int spinlock_unlock(spinlock_t* s) { return 0; } -inline static int spinlock_lock_irqsave(spinlock_t* s) { - uint32_t flags; - int ret; - +inline static int spinlock_irqsave_init(spinlock_irqsave_t* s) { if (BUILTIN_EXPECT(!s, 0)) return -EINVAL; - - flags = irq_nested_disable(); - ret = spinlock_lock(s); - if (ret) { - irq_nested_enable(flags); - return ret; - } + atomic_int32_set(&s->queue, 0); + atomic_int32_set(&s->dequeue, 1); + s->flags = 0; + s->coreid = (uint32_t)-1; + s->counter = 0; - if (!ret && (s->counter == 1)) - s->flags = flags; - - return ret; + return 0; } -inline static int spinlock_unlock_irqsave(spinlock_t* s) { - int ret, restore = 0; - uint32_t flags = 0; +inline static int spinlock_irqsave_destroy(spinlock_irqsave_t* s) { + if (BUILTIN_EXPECT(!s, 0)) + return -EINVAL; + + s->flags = 0; + s->coreid = (uint32_t)-1; + s->counter = 0; + + return 0; +} + +inline static int spinlock_irqsave_lock(spinlock_irqsave_t* s) { + uint32_t flags; + int32_t ticket; if (BUILTIN_EXPECT(!s, 0)) return -EINVAL; - if (s->counter == 1) { - restore = 1; - flags = s->flags; + flags = irq_nested_disable(); + if (s->coreid == CORE_ID) { + s->counter++; + return 0; } - ret = spinlock_unlock(s); - if (!ret && restore) - irq_nested_enable(flags); + ticket = atomic_int32_inc(&s->queue); + while (atomic_int32_read(&s->dequeue) != ticket) + NOP1; + + s->coreid = CORE_ID; + s->flags = flags; + s->counter = 1; + + return 0; +} - return ret; +inline static int spinlock_irqsave_unlock(spinlock_irqsave_t* s) { + uint32_t flags; + + if (BUILTIN_EXPECT(!s, 0)) + return -EINVAL; + + s->counter--; + if (!s->counter) { + flags = s->flags; + s->coreid = (uint32_t) -1; + s->flags = 0; + atomic_int32_inc(&s->dequeue); + irq_nested_enable(flags); + } + + return 0; } #ifdef __cplusplus diff --git a/include/metalsvm/spinlock_types.h b/include/metalsvm/spinlock_types.h index bac2f5df..5105f4d2 100644 --- a/include/metalsvm/spinlock_types.h +++ b/include/metalsvm/spinlock_types.h @@ -31,10 +31,17 @@ typedef struct spinlock { atomic_int32_t queue, dequeue; tid_t owner; uint32_t counter; - uint32_t flags; } spinlock_t; -#define SPINLOCK_INIT { ATOMIC_INIT(0), ATOMIC_INIT(1), MAX_TASKS, 0, 0 } +typedef struct spinlock_irqsave { + atomic_int32_t queue, dequeue; + uint32_t flags; + uint32_t coreid; + uint32_t counter; +} spinlock_irqsave_t; + +#define SPINLOCK_INIT { ATOMIC_INIT(0), ATOMIC_INIT(1), MAX_TASKS, 0} +#define SPINLOCK_IRQSAVE_INIT { ATOMIC_INIT(0), ATOMIC_INIT(1), 0, (uint32_t)-1, 0} #ifdef __cplusplus } diff --git a/include/metalsvm/stddef.h b/include/metalsvm/stddef.h index 2dfc9ba6..07d3ed27 100644 --- a/include/metalsvm/stddef.h +++ b/include/metalsvm/stddef.h @@ -35,6 +35,7 @@ typedef unsigned int tid_t; #define per_core(name) name #define DECLARE_PER_CORE(type, name) extern type name; #define DEFINE_PER_CORE(type, name, def_value) type name = def_value; +#define CORE_ID 0 #else #define per_core(name) name[LOGICAL_CPUID].var #define DECLARE_PER_CORE(type, name) \ @@ -42,6 +43,7 @@ typedef unsigned int tid_t; extern aligned_##name name[MAX_CORES]; #define DEFINE_PER_CORE(type, name, def_value) \ aligned_##name name[MAX_CORES] = {[0 ... MAX_CORES-1] = {def_value}}; +#define CORE_ID LOGICAL_CPUID #endif /* needed to find the task, which is currently running on this core */ diff --git a/kernel/tasks.c b/kernel/tasks.c index 3f7a1b7a..92e3ff2b 100644 --- a/kernel/tasks.c +++ b/kernel/tasks.c @@ -29,12 +29,13 @@ #include #include #include +#include #include DEFINE_PER_CORE(task_t*, current_task, NULL); static task_t task_table[MAX_TASKS] = {[0 ... MAX_TASKS-1] = {0, TASK_INVALID, ATOMIC_INIT(0), \ SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL}}; -static spinlock_t table_lock = SPINLOCK_INIT; +static spinlock_irqsave_t table_lock = SPINLOCK_IRQSAVE_INIT; /* * helper function for the assembly code to determine the current task @@ -63,7 +64,7 @@ static void wakeup_blocked_tasks(int result) wait_msg_t tmp = { per_core(current_task)->id, result }; unsigned int i; - spinlock_lock_irqsave(&table_lock); + spinlock_irqsave_lock(&table_lock); /* wake up blocked tasks */ for(i=0; ivma_lock); + spinlock_irqsave_lock(&table_lock); for(i=0; ivma_lock); child = &task_table[i].vma_list; parent = per_core(current_task)->vma_list; tmp = NULL; @@ -211,7 +212,6 @@ int sys_fork(void) tmp = *child; child = &((*child)->next); } - spinlock_unlock(&per_core(current_task)->vma_lock); mailbox_wait_msg_init(&task_table[i].inbox); memset(task_table[i].outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS); @@ -219,8 +219,13 @@ int sys_fork(void) ret = arch_fork(task_table+i); - if (parent_task != per_core(current_task)) - return 0; // Oh, the new child! => leave function + if (parent_task != per_core(current_task)) { + // Oh, the current task is the new child task! + // Leave the function without releasing the locks + // because the locks are already released + // by the parent task! + return 0; + } if (!ret) { task_table[i].status = TASK_READY; @@ -231,7 +236,8 @@ int sys_fork(void) } create_task_out: - spinlock_unlock_irqsave(&table_lock); + spinlock_irqsave_unlock(&table_lock); + spinlock_unlock(&per_core(current_task)->vma_lock); return ret; } @@ -576,7 +582,7 @@ int wakeup_task(tid_t id) int ret = -EINVAL; /* avoid nested locking */ - spinlock_lock_irqsave(&table_lock); + spinlock_irqsave_lock(&table_lock); if (task_table[id].status != TASK_BLOCKED) { kprintf("Task %d is not blocked!\n", id); @@ -585,7 +591,7 @@ int wakeup_task(tid_t id) ret = 0; } - spinlock_unlock_irqsave(&table_lock); + spinlock_irqsave_unlock(&table_lock); return ret; } @@ -594,14 +600,14 @@ int block_task(tid_t id) { int ret = -EINVAL; - spinlock_lock_irqsave(&table_lock); + spinlock_irqsave_lock(&table_lock); if ((task_table[id].status == TASK_RUNNING) || (task_table[id].status == TASK_READY)) { task_table[id].status = TASK_BLOCKED; ret = 0; } else kprintf("Unable to block task %d!\n", id); - spinlock_unlock_irqsave(&table_lock); + spinlock_irqsave_unlock(&table_lock); return ret; } @@ -611,7 +617,9 @@ void scheduler(void) unsigned int i; unsigned int new_id; - spinlock_lock(&table_lock); +#if MAX_CORES > 1 + spinlock_irqsave_lock(&table_lock); +#endif /* signalize that this task could be reused */ if (per_core(current_task)->status == TASK_FINISHED) @@ -645,5 +653,7 @@ void scheduler(void) } get_task_out: - spinlock_unlock(&table_lock); +#if MAX_CORES > 1 + spinlock_irqsave_unlock(&table_lock); +#endif }