use table_lock to avoid GPs during a task switch

This commit is contained in:
Stefan Lankes 2011-08-02 10:24:17 +02:00
parent 744abc36c8
commit 026d5e264c
2 changed files with 14 additions and 53 deletions

View file

@ -50,7 +50,6 @@ extern "C" {
#define TASK_DEFAULT_FLAGS 0
#define TASK_FPU_INIT (1 << 0)
#define TASK_FPU_USED (1 << 1)
#define TASK_SWITCH_IN_PROGESS (1 << 2)
typedef int (*entry_point_t)(void*);
typedef int (STDCALL *internal_entry_point_t)(void*);

View file

@ -149,7 +149,7 @@ static void NORETURN do_exit(int arg) {
curr_task->status = TASK_FINISHED;
reschedule();
kputs("Kernel panic: scheduler found no valid task\n");
kprintf("Kernel panic: scheduler on core %d found no valid task\n", CORE_ID);
while(1) {
HALT;
}
@ -174,22 +174,13 @@ void NORETURN abort(void) {
}
/*
* @brief: if the task switch is finished, we reset
* the TASK_SWITCH_IN_PROGRESS bit
* @brief: if the task gets the first time slice,
* the table_lock is hold and have to be released.
*/
inline static void task_switch_finished(void)
inline static void start_first_time_slice(void)
{
#if MAX_CORES > 1
uint32_t flags = irq_nested_disable();
// do we already reset the TASK_SWITCH_IN_PROGRESS bit?
task_t* old = per_core(old_task);
if (old) {
old->flags &= ~TASK_SWITCH_IN_PROGESS;
per_core(old_task) = NULL;
}
irq_nested_enable(flags);
spinlock_irqsave_unlock(&table_lock);
#endif
}
@ -314,10 +305,7 @@ int sys_fork(void)
// Leave the function without releasing the locks
// because the locks are already released
// by the parent task!
// first switch to the new current task
// => signalizes a successful task switch
task_switch_finished();
start_first_time_slice();
return 0;
}
@ -352,9 +340,7 @@ static int STDCALL kernel_entry(void* args)
int ret;
kernel_args_t* kernel_args = (kernel_args_t*) args;
// first switch to the new current task
// => signalizes a successful task switch
task_switch_finished();
start_first_time_slice();
if (BUILTIN_EXPECT(!kernel_args, 0))
return -EINVAL;
@ -595,9 +581,7 @@ static int STDCALL user_entry(void* arg)
{
int ret;
// first switch to the new current task
// => signalizes a successful task switch
task_switch_finished();
start_first_time_slice();
if (BUILTIN_EXPECT(!arg, 0))
return -EINVAL;
@ -816,11 +800,6 @@ void scheduler(void)
unsigned int i;
unsigned int new_id;
// let's play it save
// => check if we already signalizes that the previous switch
// is finished
task_switch_finished();
#if MAX_CORES > 1
spinlock_irqsave_lock(&table_lock);
#endif
@ -840,32 +819,16 @@ void scheduler(void)
for(i=1, new_id=(curr_task->id + 1) % MAX_TASKS;
i<MAX_TASKS; i++, new_id=(new_id+1) % MAX_TASKS)
{
if ((task_table[new_id].status == TASK_READY) && !(task_table[new_id].flags & TASK_SWITCH_IN_PROGESS)) {
if (curr_task->status == TASK_RUNNING) {
if (task_table[new_id].status == TASK_READY) {
if (curr_task->status == TASK_RUNNING)
curr_task->status = TASK_READY;
#if MAX_CORES > 1
curr_task->flags |= TASK_SWITCH_IN_PROGESS;
per_core(old_task) = curr_task;
#endif
}
#if MAX_CORES > 1
else per_core(old_task) = NULL;
#endif
task_table[new_id].status = TASK_RUNNING;
curr_task = per_core(current_task) = task_table+new_id;
goto get_task_out;
}
//if ((task_table[new_id].status == TASK_READY) && (task_table[new_id].flags & TASK_SWITCH_IN_PROGESS))
// kprintf("task switch %d is in progress\n", new_id);
}
#if MAX_CORES > 1
// old task will never rescheduled
per_core(old_task) = NULL;
#endif
if ((curr_task->status == TASK_RUNNING) || (curr_task->status == TASK_IDLE))
goto get_task_out;
@ -878,14 +841,13 @@ void scheduler(void)
get_task_out:
//kprintf("schedule %d on core %d\n", per_core(current_task)->id, smp_id());
if (curr_task != orig_task)
switch_task(new_id);
#if MAX_CORES > 1
spinlock_irqsave_unlock(&table_lock);
#endif
if (curr_task != orig_task) {
switch_task(new_id);
task_switch_finished();
}
}
void reschedule(void)