avoid races by adding a new task

This commit is contained in:
Stefan Lankes 2011-09-19 19:42:27 +02:00
parent 810c6db0a8
commit c08125c3ad
2 changed files with 35 additions and 46 deletions

View file

@ -135,7 +135,7 @@ typedef struct {
/// a queue for timers
task_list_t timers;
/// lock for this runqueue
spinlock_t lock;
spinlock_irqsave_t lock;
} runqueue_t;
#ifdef __cplusplus

View file

@ -53,11 +53,11 @@ static task_t task_table[MAX_TASKS] = { \
static spinlock_irqsave_t table_lock = SPINLOCK_IRQSAVE_INIT;
#if MAX_CORES > 1
static runqueue_t runqueues[MAX_CORES] = { \
[0] = {task_table+0, NULL, 0, 0, 0, 0, 0, {[0 ... MAX_PRIO-1] = {NULL, NULL}}, {NULL, NULL}, SPINLOCK_INIT}, \
[1 ... MAX_CORES-1] = {NULL, NULL, 0, 0, 0, 0, 0, {[0 ... MAX_PRIO-1] = {NULL, NULL}}, {NULL, NULL}, SPINLOCK_INIT}};
[0] = {task_table+0, NULL, 0, 0, 0, 0, 0, {[0 ... MAX_PRIO-1] = {NULL, NULL}}, {NULL, NULL}, SPINLOCK_IRQSAVE_INIT}, \
[1 ... MAX_CORES-1] = {NULL, NULL, 0, 0, 0, 0, 0, {[0 ... MAX_PRIO-1] = {NULL, NULL}}, {NULL, NULL}, SPINLOCK_IRQSAVE_INIT}};
#else
static runqueue_t runqueues[1] = { \
[0] = {task_table+0, NULL, 0, 0, 0, 0, 0, {[0 ... MAX_PRIO-1] = {NULL, NULL}}, {NULL, NULL}, SPINLOCK_INIT}};
[0] = {task_table+0, NULL, 0, 0, 0, 0, 0, {[0 ... MAX_PRIO-1] = {NULL, NULL}}, {NULL, NULL}, SPINLOCK_IRQSAVE_INIT}};
#endif
DEFINE_PER_CORE(task_t*, current_task, task_table+0);
@ -126,22 +126,22 @@ static void finish_task_switch(uint32_t irq)
uint32_t core_id = CORE_ID;
task_t* old;
spinlock_lock(&runqueues[core_id].lock);
spinlock_irqsave_lock(&runqueues[core_id].lock);
if ((old = runqueues[core_id].old_task) != NULL) {
prio = old->prio;
if (!runqueues[core_id].queue[prio-1].first) {
old->prev = NULL;
old->next = old->prev = NULL;
runqueues[core_id].queue[prio-1].first = runqueues[core_id].queue[prio-1].last = old;
} else {
old->next = NULL;
old->prev = runqueues[core_id].queue[prio-1].last;
runqueues[core_id].queue[prio-1].last->next = old;
runqueues[core_id].queue[prio-1].last = old;
}
runqueues[core_id].old_task = NULL;
runqueues[core_id].prio_bitmap |= (1 << prio);
old->next = NULL;
}
spinlock_unlock(&runqueues[core_id].lock);
spinlock_irqsave_unlock(&runqueues[core_id].lock);
if (irq)
irq_enable();
@ -208,9 +208,9 @@ static void NORETURN do_exit(int arg) {
// decrease the number of active tasks
flags = irq_nested_disable();
core_id = CORE_ID;
spinlock_lock(&runqueues[core_id].lock);
spinlock_irqsave_lock(&runqueues[core_id].lock);
runqueues[core_id].nr_tasks--;
spinlock_unlock(&runqueues[core_id].lock);
spinlock_irqsave_unlock(&runqueues[core_id].lock);
irq_nested_enable(flags);
reschedule();
@ -299,23 +299,20 @@ static int create_task(tid_t* id, internal_entry_point_t ep, void* arg, uint8_t
task_table[i].start_tick = get_clock_tick();
// add task in the runqueue
spinlock_lock(&runqueues[core_id].lock);
spinlock_irqsave_lock(&runqueues[core_id].lock);
runqueues[core_id].prio_bitmap |= (1 << prio);
runqueues[core_id].nr_tasks++;
if (!runqueues[core_id].queue[prio-1].first) {
//kprintf("add task %d at the begin of queue %d\n", i, prio-1);
task_table[i].prev = NULL;
task_table[i].next = task_table[i].prev = NULL;
runqueues[core_id].queue[prio-1].first = task_table+i;
runqueues[core_id].queue[prio-1].last = task_table+i;
task_table[i].next = NULL;
} else {
//kprintf("add task %d at the end of queue %d, first task %d\n", i, prio-1, runqueues[core_id].queue[prio-1].first->id);
task_table[i].prev = runqueues[core_id].queue[prio-1].last;
task_table[i].next = NULL;
runqueues[core_id].queue[prio-1].last->next = task_table+i;
runqueues[core_id].queue[prio-1].last = task_table+i;
task_table[i].next = NULL;
}
spinlock_unlock(&runqueues[core_id].lock);
spinlock_irqsave_unlock(&runqueues[core_id].lock);
break;
}
}
@ -392,21 +389,20 @@ int sys_fork(void)
task_table[i].last_core = parent_task->last_core;
// add task in the runqueue
spinlock_lock(&runqueues[core_id].lock);
spinlock_irqsave_lock(&runqueues[core_id].lock);
runqueues[core_id].prio_bitmap |= (1 << parent_task->prio);
runqueues[core_id].nr_tasks++;
if (!runqueues[core_id].queue[parent_task->prio-1].first) {
task_table[i].prev = NULL;
task_table[i].next = task_table[i].prev = NULL;
runqueues[core_id].queue[parent_task->prio-1].first = task_table+i;
runqueues[core_id].queue[parent_task->prio-1].last = task_table+i;
task_table[i].next = NULL;
} else {
task_table[i].prev = runqueues[core_id].queue[parent_task->prio-1].last;
task_table[i].next = NULL;
runqueues[core_id].queue[parent_task->prio-1].last->next = task_table+i;
runqueues[core_id].queue[parent_task->prio-1].last = task_table+i;
task_table[i].next = NULL;
}
spinlock_unlock(&runqueues[core_id].lock);
spinlock_irqsave_unlock(&runqueues[core_id].lock);
ret = arch_fork(task_table+i);
@ -912,7 +908,7 @@ int wakeup_task(tid_t id)
task->status = TASK_READY;
ret = 0;
spinlock_lock(&runqueues[core_id].lock);
spinlock_irqsave_lock(&runqueues[core_id].lock);
// increase the number of ready tasks
runqueues[core_id].nr_tasks++;
@ -940,7 +936,7 @@ int wakeup_task(tid_t id)
runqueues[core_id].queue[prio-1].last->next = task;
runqueues[core_id].queue[prio-1].last = task;
}
spinlock_unlock(&runqueues[core_id].lock);
spinlock_irqsave_unlock(&runqueues[core_id].lock);
}
irq_nested_enable(flags);
@ -975,7 +971,7 @@ int block_current_task(void)
task_table[id].status = TASK_BLOCKED;
ret = 0;
spinlock_lock(&runqueues[core_id].lock);
spinlock_irqsave_lock(&runqueues[core_id].lock);
// reduce the number of ready tasks
runqueues[core_id].nr_tasks--;
@ -996,7 +992,7 @@ int block_current_task(void)
if (!runqueues[core_id].queue[prio-1].first)
runqueues[core_id].prio_bitmap &= ~(1 << prio);
spinlock_unlock(&runqueues[core_id].lock);
spinlock_irqsave_unlock(&runqueues[core_id].lock);
}
irq_nested_enable(flags);
@ -1024,7 +1020,7 @@ int set_timer(uint64_t deadline)
curr_task->flags |= TASK_TIMER;
ret = 0;
spinlock_lock(&runqueues[core_id].lock);
spinlock_irqsave_lock(&runqueues[core_id].lock);
// reduce the number of ready tasks
runqueues[core_id].nr_tasks--;
@ -1075,7 +1071,7 @@ int set_timer(uint64_t deadline)
}
}
spinlock_unlock(&runqueues[core_id].lock);
spinlock_irqsave_unlock(&runqueues[core_id].lock);
} else kprintf("Task is already blocked. No timer will be set!\n");
irq_nested_enable(flags);
@ -1097,11 +1093,11 @@ void update_load(void)
if (runqueues[core_id].load_counter < 0) {
runqueues[core_id].load_counter += 5*TIMER_FREQ;
spinlock_lock(&runqueues[core_id].lock);
spinlock_irqsave_lock(&runqueues[core_id].lock);
runqueues[core_id].load *= EXP;
runqueues[core_id].load += runqueues[core_id].nr_tasks*(FIXED_1-EXP);
runqueues[core_id].load >>= FSHIFT;
spinlock_unlock(&runqueues[core_id].lock);
spinlock_irqsave_unlock(&runqueues[core_id].lock);
//kprintf("load of core %u: %u, %u\n", core_id, runqueues[core_id].load, runqueues[core_id].nr_tasks);
}
@ -1224,11 +1220,8 @@ void scheduler(void)
curr_task->last_core = core_id;
/* signalizes that this task could be reused */
if (curr_task->status == TASK_FINISHED) {
if (curr_task->status == TASK_FINISHED)
curr_task->status = TASK_INVALID;
//kprintf("finished task %d, runqueues[%d].idle->id = %d\n", curr_task->id, core_id, runqueues[core_id].idle->id);
//kprintf("msb %d\n", msb(runqueues[core_id].prio_bitmap));
}
/* if the task is using the FPU, we need to save the FPU context */
if (curr_task->flags & TASK_FPU_USED) {
@ -1236,7 +1229,7 @@ void scheduler(void)
curr_task->flags &= ~TASK_FPU_USED;
}
spinlock_lock(&runqueues[core_id].lock);
spinlock_irqsave_lock(&runqueues[core_id].lock);
// check timers
current_tick = get_clock_tick();
@ -1298,16 +1291,11 @@ void scheduler(void)
}
curr_task = per_core(current_task) = runqueues[core_id].queue[prio-1].first;
/*if (curr_task->status == TASK_INVALID) {
task_t* tmp = curr_task;
kprintf("Upps..., got invalid task %d, orig task %d\n", curr_task->id, orig_task->id);
//kputs("Dump all tasks: ");
//while (tmp != NULL) {
// kprintf("%d ", tmp->id);
// tmp = tmp->next;
//}
//kputs("\n");
}*/
if (BUILTIN_EXPECT(curr_task->status == TASK_INVALID, 0)) {
pushbg(COL_RED);
kprintf("Upps!!!!!!! Got invalid task %d, orig task %d\n", curr_task->id, orig_task->id);
popbg();
}
curr_task->status = TASK_RUNNING;
// remove new task from queue
@ -1316,10 +1304,11 @@ void scheduler(void)
runqueues[core_id].queue[prio-1].last = NULL;
runqueues[core_id].prio_bitmap &= ~(1 << prio);
}
curr_task->next = curr_task->prev = NULL;
}
get_task_out:
spinlock_unlock(&runqueues[core_id].lock);
spinlock_irqsave_unlock(&runqueues[core_id].lock);
if (curr_task != orig_task) {
//kprintf("schedule from %u to %u with prio %u on core %u\n",