- add the function spinlock_has_lock to avoid nested blocking

git-svn-id: http://svn.lfbs.rwth-aachen.de/svn/scc/trunk/MetalSVM@52 315a16e6-25f9-4109-90ae-ca3045a26c18
This commit is contained in:
stefan 2010-08-09 17:31:11 +00:00
parent 311b32aef8
commit ea6658a2ab
4 changed files with 55 additions and 5 deletions

View file

@ -73,7 +73,7 @@ next_try:
} else {
s->queue[s->pos] = current_task->id;
s->pos = (s->pos + 1) % MAX_TASKS;
current_task->status = TASK_BLOCKED;
block_task(current_task->id);
spinlock_unlock(&s->lock);
reschedule();
goto next_try;

View file

@ -31,9 +31,10 @@ extern "C" {
typedef struct {
atomic_int32_t queue, dequeue;
tid_t owner;
} spinlock_t;
#define SPINLOCK_INIT { ATOMIC_INIT(0), ATOMIC_INIT(1) }
#define SPINLOCK_INIT { ATOMIC_INIT(0), ATOMIC_INIT(1), MAX_TASKS }
inline static int spinlock_init(spinlock_t* s) {
if (BUILTIN_EXPECT(!s, 0))
@ -41,21 +42,27 @@ inline static int spinlock_init(spinlock_t* s) {
atomic_int32_set(&s->queue, 0);
atomic_int32_set(&s->dequeue, 1);
s->owner = MAX_TASKS;
return 0;
}
inline static int spinlock_destroy(spinlock_t* s) {
s->owner = MAX_TASKS;
return 0;
}
inline static int spinlock_lock(spinlock_t* s) {
int32_t ticket;
if (BUILTIN_EXPECT(!s, 0))
return -1;
int32_t ticket = atomic_int32_inc(&s->queue);
ticket = atomic_int32_inc(&s->queue);
while(atomic_int32_read(&s->dequeue) != ticket)
reschedule();
s->owner = current_task->id;
return 0;
}
@ -63,6 +70,7 @@ inline static int spinlock_unlock(spinlock_t* s) {
if (BUILTIN_EXPECT(!s, 0))
return -1;
s->owner = MAX_TASKS;
atomic_int32_inc(&s->dequeue);
return 0;
@ -88,6 +96,10 @@ inline static int spinlock_unlock_irqsave(spinlock_t* s) {
return ret;
}
inline static int spinlock_has_lock(spinlock_t* s) {
return (s->owner == current_task->id);
}
#ifdef __cplusplus
}
#endif

View file

@ -52,6 +52,9 @@ void scheduler(void);
*/
int wakeup_task(tid_t);
/* change status to TASK_BLOCKED */
int block_task(tid_t);
/* abort the current task */
void NORETURN abort(void);

View file

@ -208,9 +208,44 @@ join_out:
int wakeup_task(tid_t id)
{
task_table[id].status = TASK_READY;
int ret = -1;
int need_lock = !spinlock_has_lock(&table_lock);
return 0;
/* avoid nested locking */
if (need_lock)
spinlock_lock_irqsave(&table_lock);
if (task_table[id].status != TASK_BLOCKED) {
kprintf("Task %d is not blocked!\n", id);
} else {
task_table[id].status = TASK_READY;
ret = 0;
}
if (need_lock)
spinlock_unlock_irqsave(&table_lock);
return ret;
}
int block_task(tid_t id)
{
int ret = -1;
int need_lock = !spinlock_has_lock(&table_lock);
/* avoid nested locking */
if (need_lock)
spinlock_lock_irqsave(&table_lock);
if ((task_table[id].status == TASK_RUNNING) || (task_table[id].status == TASK_READY)) {
task_table[id].status = TASK_BLOCKED;
ret = 0;
} else kprintf("Unable to block task %d!\n", id);
if (need_lock)
spinlock_unlock_irqsave(&table_lock);
return ret;
}
void scheduler(void)