diff --git a/kernel/tasks.c b/kernel/tasks.c index 48182a9b..bf57dda0 100644 --- a/kernel/tasks.c +++ b/kernel/tasks.c @@ -52,7 +52,9 @@ static task_t task_table[MAX_TASKS] = { \ static spinlock_irqsave_t table_lock = SPINLOCK_IRQSAVE_INIT; DEFINE_PER_CORE(task_t*, current_task, task_table+0); +#if MAX_CORES > 1 DEFINE_PER_CORE_STATIC(task_t*, old_task, NULL); +#endif /** @brief helper function for the assembly code to determine the current task * @return Pointer to the task_t structure of current task @@ -177,6 +179,7 @@ void NORETURN abort(void) { */ inline static void task_switch_finished(void) { +#if MAX_CORES > 1 uint32_t flags = irq_nested_disable(); // do we already reset the TASK_SWITCH_IN_PROGRESS bit? @@ -187,6 +190,7 @@ inline static void task_switch_finished(void) } irq_nested_enable(flags); +#endif } /** @brief Create a task with a specific entry point @@ -839,10 +843,14 @@ void scheduler(void) if ((task_table[new_id].status == TASK_READY) && !(task_table[new_id].flags & TASK_SWITCH_IN_PROGESS)) { if (curr_task->status == TASK_RUNNING) { curr_task->status = TASK_READY; +#if MAX_CORES > 1 curr_task->flags |= TASK_SWITCH_IN_PROGESS; per_core(old_task) = curr_task; - } else per_core(old_task) = NULL; - +#endif + } +#if MAX_CORES > 1 + else per_core(old_task) = NULL; +#endif task_table[new_id].status = TASK_RUNNING; curr_task = per_core(current_task) = task_table+new_id; @@ -853,8 +861,10 @@ void scheduler(void) // kprintf("task switch %d is in progress\n", new_id); } +#if MAX_CORES > 1 // old task will never rescheduled per_core(old_task) = NULL; +#endif if ((curr_task->status == TASK_RUNNING) || (curr_task->status == TASK_IDLE)) goto get_task_out;