2015-05-23 14:35:45 +02:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2010, Stefan Lankes, RWTH Aachen University
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions are met:
|
|
|
|
* * Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* * Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* * Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this
|
|
|
|
* software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
|
|
|
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
|
|
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
|
|
|
|
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
|
|
|
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
|
|
|
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
|
|
|
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
|
|
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <hermit/stddef.h>
|
|
|
|
#include <hermit/stdlib.h>
|
|
|
|
#include <hermit/stdio.h>
|
|
|
|
#include <hermit/string.h>
|
|
|
|
#include <hermit/tasks.h>
|
|
|
|
#include <hermit/tasks_types.h>
|
|
|
|
#include <hermit/spinlock.h>
|
2015-06-11 09:20:34 +02:00
|
|
|
#include <hermit/time.h>
|
2015-05-23 14:35:45 +02:00
|
|
|
#include <hermit/errno.h>
|
|
|
|
#include <hermit/syscall.h>
|
|
|
|
#include <hermit/memory.h>
|
2016-11-04 12:09:43 +01:00
|
|
|
#include <hermit/logging.h>
|
2016-07-01 19:03:51 +02:00
|
|
|
#include <asm/processor.h>
|
2015-05-23 14:35:45 +02:00
|
|
|
|
2016-01-10 03:13:31 +01:00
|
|
|
/*
|
|
|
|
* Note that linker symbols are not variables, they have no memory allocated for
|
|
|
|
* maintaining a value, rather their address is their value.
|
|
|
|
*/
|
2016-08-26 07:01:25 +02:00
|
|
|
extern atomic_int32_t cpu_online;
|
2016-01-10 03:13:31 +01:00
|
|
|
|
2017-03-09 07:35:59 +01:00
|
|
|
volatile uint32_t go_down = 0;
|
|
|
|
|
2015-05-23 14:35:45 +02:00
|
|
|
/** @brief Array of task structures (aka PCB)
|
|
|
|
*
|
|
|
|
* A task's id will be its position in this array.
|
|
|
|
*/
|
|
|
|
static task_t task_table[MAX_TASKS] = { \
|
2016-09-22 18:18:27 +02:00
|
|
|
[0] = {0, TASK_IDLE, 0, NULL, NULL, NULL, TASK_DEFAULT_FLAGS, 0, 0, 0, 0, NULL, 0, NULL, NULL, 0, 0, 0, NULL, FPU_STATE_INIT}, \
|
|
|
|
[1 ... MAX_TASKS-1] = {0, TASK_INVALID, 0, NULL, NULL, NULL, TASK_DEFAULT_FLAGS, 0, 0, 0, 0, NULL, 0, NULL, NULL, 0, 0, 0, NULL, FPU_STATE_INIT}};
|
2015-05-23 14:35:45 +02:00
|
|
|
|
|
|
|
static spinlock_irqsave_t table_lock = SPINLOCK_IRQSAVE_INIT;
|
|
|
|
|
2015-05-27 00:04:01 +02:00
|
|
|
#if MAX_CORES > 1
|
|
|
|
static readyqueues_t readyqueues[MAX_CORES] = { \
|
2015-07-12 16:39:27 +02:00
|
|
|
[0 ... MAX_CORES-1] = {NULL, NULL, 0, 0, 0, {[0 ... MAX_PRIO-2] = {NULL, NULL}}, {NULL, NULL}, SPINLOCK_IRQSAVE_INIT}};
|
2015-05-27 00:04:01 +02:00
|
|
|
#else
|
2015-07-12 16:39:27 +02:00
|
|
|
static readyqueues_t readyqueues[1] = {[0] = {task_table+0, NULL, 0, 0, 0, {[0 ... MAX_PRIO-2] = {NULL, NULL}}, {NULL, NULL}, SPINLOCK_IRQSAVE_INIT}};
|
2015-05-27 00:04:01 +02:00
|
|
|
#endif
|
2015-05-23 14:35:45 +02:00
|
|
|
|
2015-05-27 00:04:01 +02:00
|
|
|
DEFINE_PER_CORE(task_t*, current_task, task_table+0);
|
2016-08-31 12:42:19 +02:00
|
|
|
|
2015-07-24 21:12:05 +02:00
|
|
|
#if MAX_CORES > 1
|
2015-07-19 11:12:05 +02:00
|
|
|
DEFINE_PER_CORE(uint32_t, __core_id, 0);
|
2015-07-24 21:12:05 +02:00
|
|
|
#endif
|
2016-08-31 12:42:19 +02:00
|
|
|
|
|
|
|
static void update_timer(task_t* first)
|
|
|
|
{
|
2018-04-29 23:50:02 +02:00
|
|
|
if (first) {
|
2016-08-31 12:42:19 +02:00
|
|
|
if(first->timeout > get_clock_tick()) {
|
|
|
|
timer_deadline((uint32_t) (first->timeout - get_clock_tick()));
|
|
|
|
} else {
|
|
|
|
// workaround: start timer so new head will be serviced
|
|
|
|
timer_deadline(1);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// prevent spurious interrupts
|
|
|
|
timer_disable();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void timer_queue_remove(uint32_t core_id, task_t* task)
|
|
|
|
{
|
|
|
|
if(BUILTIN_EXPECT(!task, 0)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
task_list_t* timer_queue = &readyqueues[core_id].timers;
|
|
|
|
|
|
|
|
#ifdef DYNAMIC_TICKS
|
|
|
|
// if task is first in timer queue, we need to update the oneshot
|
|
|
|
// timer for the next task
|
|
|
|
if(timer_queue->first == task) {
|
|
|
|
update_timer(task->next);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
task_list_remove_task(timer_queue, task);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void timer_queue_push(uint32_t core_id, task_t* task)
|
|
|
|
{
|
|
|
|
task_list_t* timer_queue = &readyqueues[core_id].timers;
|
|
|
|
|
|
|
|
spinlock_irqsave_lock(&readyqueues[core_id].lock);
|
|
|
|
|
|
|
|
task_t* first = timer_queue->first;
|
|
|
|
|
|
|
|
if(!first) {
|
|
|
|
timer_queue->first = timer_queue->last = task;
|
|
|
|
task->next = task->prev = NULL;
|
|
|
|
|
2018-04-29 23:50:02 +02:00
|
|
|
#ifdef DYNAMIC_TICKS
|
|
|
|
update_timer(task);
|
|
|
|
#endif
|
2016-08-31 12:42:19 +02:00
|
|
|
} else {
|
|
|
|
// lookup position where to insert task
|
|
|
|
task_t* tmp = first;
|
|
|
|
while(tmp && (task->timeout >= tmp->timeout))
|
|
|
|
tmp = tmp->next;
|
|
|
|
|
|
|
|
if(!tmp) {
|
|
|
|
// insert at the end of queue
|
|
|
|
task->next = NULL;
|
|
|
|
task->prev = timer_queue->last;
|
|
|
|
|
|
|
|
// there has to be a last element because there is also a first one
|
|
|
|
timer_queue->last->next = task;
|
|
|
|
timer_queue->last = task;
|
|
|
|
} else {
|
|
|
|
task->next = tmp;
|
|
|
|
task->prev = tmp->prev;
|
|
|
|
tmp->prev = task;
|
|
|
|
|
|
|
|
if(task->prev)
|
|
|
|
task->prev->next = task;
|
|
|
|
|
|
|
|
if(timer_queue->first == tmp) {
|
|
|
|
timer_queue->first = task;
|
|
|
|
|
2018-04-29 23:50:02 +02:00
|
|
|
#ifdef DYNAMIC_TICKS
|
|
|
|
update_timer(task);
|
|
|
|
#endif
|
2016-08-31 12:42:19 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
spinlock_irqsave_unlock(&readyqueues[core_id].lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-09-23 10:26:41 +02:00
|
|
|
static inline void readyqueues_push_back(uint32_t core_id, task_t* task)
|
2015-05-23 14:35:45 +02:00
|
|
|
{
|
2016-08-31 12:42:19 +02:00
|
|
|
// idle task (prio=0) doesn't have a queue
|
|
|
|
task_list_t* readyqueue = &readyqueues[core_id].queue[task->prio - 1];
|
|
|
|
|
|
|
|
task_list_push_back(readyqueue, task);
|
|
|
|
|
|
|
|
// update priority bitmap
|
|
|
|
readyqueues[core_id].prio_bitmap |= (1 << task->prio);
|
2015-05-23 14:35:45 +02:00
|
|
|
}
|
|
|
|
|
2016-08-31 12:42:19 +02:00
|
|
|
|
2017-09-23 10:26:41 +02:00
|
|
|
static inline void readyqueues_remove(uint32_t core_id, task_t* task)
|
2016-08-31 12:42:19 +02:00
|
|
|
{
|
|
|
|
// idle task (prio=0) doesn't have a queue
|
|
|
|
task_list_t* readyqueue = &readyqueues[core_id].queue[task->prio - 1];
|
|
|
|
|
|
|
|
task_list_remove_task(readyqueue, task);
|
|
|
|
|
|
|
|
// no valid task in queue => update priority bitmap
|
|
|
|
if (readyqueue->first == NULL)
|
|
|
|
readyqueues[core_id].prio_bitmap &= ~(1 << task->prio);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-03-19 22:49:23 +01:00
|
|
|
void fpu_handler(void)
|
2017-03-09 08:09:05 +01:00
|
|
|
{
|
|
|
|
task_t* task = per_core(current_task);
|
|
|
|
uint32_t core_id = CORE_ID;
|
|
|
|
|
|
|
|
task->flags |= TASK_FPU_USED;
|
|
|
|
|
|
|
|
if (!(task->flags & TASK_FPU_INIT)) {
|
|
|
|
// use the FPU at the first time => Initialize FPU
|
|
|
|
fpu_init(&task->fpu);
|
|
|
|
task->flags |= TASK_FPU_INIT;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (readyqueues[core_id].fpu_owner == task->id)
|
|
|
|
return;
|
|
|
|
|
|
|
|
spinlock_irqsave_lock(&readyqueues[core_id].lock);
|
|
|
|
// did another already use the the FPU? => save FPU state
|
|
|
|
if (readyqueues[core_id].fpu_owner) {
|
|
|
|
save_fpu_state(&(task_table[readyqueues[core_id].fpu_owner].fpu));
|
|
|
|
task_table[readyqueues[core_id].fpu_owner].flags &= ~TASK_FPU_USED;
|
|
|
|
}
|
|
|
|
readyqueues[core_id].fpu_owner = task->id;
|
|
|
|
spinlock_irqsave_unlock(&readyqueues[core_id].lock);
|
|
|
|
|
|
|
|
restore_fpu_state(&task->fpu);
|
|
|
|
}
|
|
|
|
|
2018-04-28 15:28:15 +02:00
|
|
|
int is_task_available(void)
|
|
|
|
{
|
|
|
|
uint32_t core_id = CORE_ID;
|
|
|
|
|
|
|
|
return readyqueues[core_id].nr_tasks > 0 ? 1 : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void check_scheduling(void)
|
2015-06-11 09:20:34 +02:00
|
|
|
{
|
2016-09-22 18:18:27 +02:00
|
|
|
uint32_t prio = get_highest_priority();
|
|
|
|
task_t* curr_task = per_core(current_task);
|
|
|
|
|
|
|
|
if (prio > curr_task->prio) {
|
2015-06-11 09:20:34 +02:00
|
|
|
reschedule();
|
2016-09-22 18:31:03 +02:00
|
|
|
#ifdef DYNAMIC_TICKS
|
2017-02-18 11:06:03 +01:00
|
|
|
} else if ((prio > 0) && (prio == curr_task->prio)) {
|
2016-09-22 18:18:27 +02:00
|
|
|
// if a task is ready, check if the current task runs already one tick (one time slice)
|
|
|
|
// => reschedule to realize round robin
|
|
|
|
|
|
|
|
const uint64_t diff_cycles = get_rdtsc() - curr_task->last_tsc;
|
|
|
|
const uint64_t cpu_freq_hz = 1000000ULL * (uint64_t) get_cpu_frequency();
|
|
|
|
|
|
|
|
if (((diff_cycles * (uint64_t) TIMER_FREQ) / cpu_freq_hz) > 0) {
|
2016-11-04 12:09:43 +01:00
|
|
|
LOG_DEBUG("Time slice expired for task %d on core %d. New task has priority %u.\n", curr_task->id, CORE_ID, prio);
|
2016-09-22 18:18:27 +02:00
|
|
|
reschedule();
|
|
|
|
}
|
2016-09-22 18:31:03 +02:00
|
|
|
#endif
|
2016-09-22 18:18:27 +02:00
|
|
|
}
|
2015-06-11 09:20:34 +02:00
|
|
|
}
|
|
|
|
|
2016-08-31 12:42:19 +02:00
|
|
|
|
2015-05-23 14:35:45 +02:00
|
|
|
uint32_t get_highest_priority(void)
|
|
|
|
{
|
2015-10-06 13:48:25 +02:00
|
|
|
uint32_t prio = msb(readyqueues[CORE_ID].prio_bitmap);
|
|
|
|
|
|
|
|
if (prio > MAX_PRIO)
|
|
|
|
return 0;
|
|
|
|
return prio;
|
2015-05-23 14:35:45 +02:00
|
|
|
}
|
|
|
|
|
2016-08-31 12:42:19 +02:00
|
|
|
|
2016-11-06 19:26:19 +01:00
|
|
|
void* get_readyqueue(void)
|
|
|
|
{
|
|
|
|
return &readyqueues[CORE_ID];
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-05-23 14:35:45 +02:00
|
|
|
int multitasking_init(void)
|
|
|
|
{
|
2015-05-31 00:30:13 +02:00
|
|
|
uint32_t core_id = CORE_ID;
|
|
|
|
|
2015-05-23 14:35:45 +02:00
|
|
|
if (BUILTIN_EXPECT(task_table[0].status != TASK_IDLE, 0)) {
|
2016-11-04 12:09:43 +01:00
|
|
|
LOG_ERROR("Task 0 is not an idle task\n");
|
2015-05-23 14:35:45 +02:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
task_table[0].prio = IDLE_PRIO;
|
2017-11-26 12:48:00 +01:00
|
|
|
task_table[0].stack = NULL; // will be initialized later
|
|
|
|
task_table[0].ist_addr = NULL; // will be initialized later
|
2015-07-21 18:44:13 +02:00
|
|
|
set_per_core(current_task, task_table+0);
|
2015-05-23 14:35:45 +02:00
|
|
|
|
2015-05-31 00:30:13 +02:00
|
|
|
readyqueues[core_id].idle = task_table+0;
|
2015-05-23 14:35:45 +02:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-11-26 12:48:00 +01:00
|
|
|
int set_boot_stack(tid_t id, size_t stack, size_t ist_addr)
|
|
|
|
{
|
|
|
|
if (id < MAX_CORES) {
|
|
|
|
task_table[id].stack = (void*) stack;
|
|
|
|
task_table[id].ist_addr = (void*) ist_addr;
|
2016-08-31 12:42:19 +02:00
|
|
|
|
2017-11-26 12:48:00 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
tid_t set_idle_task(void)
|
2015-05-31 00:30:13 +02:00
|
|
|
{
|
2017-11-26 12:48:00 +01:00
|
|
|
uint32_t core_id = CORE_ID;
|
|
|
|
tid_t id = ~0;
|
2015-05-31 00:30:13 +02:00
|
|
|
|
|
|
|
spinlock_irqsave_lock(&table_lock);
|
|
|
|
|
2017-11-26 12:48:00 +01:00
|
|
|
for(uint32_t i=0; i<MAX_TASKS; i++) {
|
2015-05-31 00:30:13 +02:00
|
|
|
if (task_table[i].status == TASK_INVALID) {
|
2017-11-26 12:48:00 +01:00
|
|
|
task_table[i].id = id = i;
|
2015-05-31 00:30:13 +02:00
|
|
|
task_table[i].status = TASK_IDLE;
|
2015-07-11 17:04:07 +02:00
|
|
|
task_table[i].last_core = core_id;
|
|
|
|
task_table[i].last_stack_pointer = NULL;
|
2017-11-26 12:48:00 +01:00
|
|
|
task_table[i].stack = NULL;
|
|
|
|
task_table[i].ist_addr = NULL;
|
2015-07-11 17:04:07 +02:00
|
|
|
task_table[i].prio = IDLE_PRIO;
|
|
|
|
task_table[i].heap = NULL;
|
2015-07-19 11:12:05 +02:00
|
|
|
readyqueues[core_id].idle = task_table+i;
|
2015-07-21 18:44:13 +02:00
|
|
|
set_per_core(current_task, readyqueues[core_id].idle);
|
2015-05-31 00:30:13 +02:00
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
spinlock_irqsave_unlock(&table_lock);
|
|
|
|
|
2017-11-26 12:48:00 +01:00
|
|
|
return id;
|
2015-05-31 00:30:13 +02:00
|
|
|
}
|
|
|
|
|
2015-05-23 14:35:45 +02:00
|
|
|
void finish_task_switch(void)
|
|
|
|
{
|
|
|
|
task_t* old;
|
2015-05-27 00:04:01 +02:00
|
|
|
const uint32_t core_id = CORE_ID;
|
2015-05-23 14:35:45 +02:00
|
|
|
|
2015-05-27 00:04:01 +02:00
|
|
|
spinlock_irqsave_lock(&readyqueues[core_id].lock);
|
2015-05-23 14:35:45 +02:00
|
|
|
|
2015-05-27 00:04:01 +02:00
|
|
|
if ((old = readyqueues[core_id].old_task) != NULL) {
|
2016-08-31 12:42:19 +02:00
|
|
|
readyqueues[core_id].old_task = NULL;
|
|
|
|
|
2015-08-28 08:00:54 +02:00
|
|
|
if (old->status == TASK_FINISHED) {
|
2015-08-17 12:51:39 +02:00
|
|
|
/* cleanup task */
|
2015-08-28 08:00:54 +02:00
|
|
|
if (old->stack) {
|
2018-03-23 04:04:44 -04:00
|
|
|
//LOG_INFO("Release stack at 0x%zx\n", old->stack);
|
2016-06-03 06:24:46 +02:00
|
|
|
destroy_stack(old->stack, DEFAULT_STACK_SIZE);
|
2015-08-28 08:00:54 +02:00
|
|
|
old->stack = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!old->parent && old->heap) {
|
2015-08-17 12:51:39 +02:00
|
|
|
kfree(old->heap);
|
2015-08-28 08:00:54 +02:00
|
|
|
old->heap = NULL;
|
|
|
|
}
|
|
|
|
|
2016-06-03 06:24:46 +02:00
|
|
|
if (old->ist_addr) {
|
|
|
|
destroy_stack(old->ist_addr, KERNEL_STACK_SIZE);
|
|
|
|
old->ist_addr = NULL;
|
|
|
|
}
|
|
|
|
|
2015-08-28 08:00:54 +02:00
|
|
|
old->last_stack_pointer = NULL;
|
|
|
|
|
2016-02-15 08:54:49 +01:00
|
|
|
if (readyqueues[core_id].fpu_owner == old->id)
|
|
|
|
readyqueues[core_id].fpu_owner = 0;
|
|
|
|
|
2015-08-28 08:00:54 +02:00
|
|
|
/* signalizes that this task could be reused */
|
|
|
|
old->status = TASK_INVALID;
|
2015-05-23 14:35:45 +02:00
|
|
|
} else {
|
2016-08-31 12:42:19 +02:00
|
|
|
// re-enqueue old task
|
|
|
|
readyqueues_push_back(core_id, old);
|
2015-05-23 14:35:45 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-27 00:04:01 +02:00
|
|
|
spinlock_irqsave_unlock(&readyqueues[core_id].lock);
|
2015-05-23 14:35:45 +02:00
|
|
|
}
|
|
|
|
|
2016-08-31 12:42:19 +02:00
|
|
|
|
2015-10-03 01:43:17 +02:00
|
|
|
void NORETURN do_exit(int arg)
|
2015-05-23 14:35:45 +02:00
|
|
|
{
|
2015-05-27 00:04:01 +02:00
|
|
|
task_t* curr_task = per_core(current_task);
|
|
|
|
const uint32_t core_id = CORE_ID;
|
2015-05-23 14:35:45 +02:00
|
|
|
|
2019-08-06 20:58:19 +02:00
|
|
|
LOG_DEBUG("Terminate task: %u, return value %d\n", curr_task->id, arg);
|
2015-05-23 14:35:45 +02:00
|
|
|
|
2015-08-28 08:00:54 +02:00
|
|
|
uint8_t flags = irq_nested_disable();
|
|
|
|
|
2015-05-23 14:35:45 +02:00
|
|
|
// decrease the number of active tasks
|
2015-05-27 00:04:01 +02:00
|
|
|
spinlock_irqsave_lock(&readyqueues[core_id].lock);
|
|
|
|
readyqueues[core_id].nr_tasks--;
|
|
|
|
spinlock_irqsave_unlock(&readyqueues[core_id].lock);
|
2015-05-23 14:35:45 +02:00
|
|
|
|
2018-06-29 06:01:33 +00:00
|
|
|
// release the thread local storage
|
|
|
|
destroy_tls();
|
2015-12-30 09:51:03 +01:00
|
|
|
|
2015-05-23 14:35:45 +02:00
|
|
|
curr_task->status = TASK_FINISHED;
|
2018-03-23 04:04:44 -04:00
|
|
|
|
2015-05-23 14:35:45 +02:00
|
|
|
reschedule();
|
|
|
|
|
2015-08-28 08:00:54 +02:00
|
|
|
irq_nested_enable(flags);
|
|
|
|
|
2016-11-04 12:09:43 +01:00
|
|
|
LOG_ERROR("Kernel panic: scheduler found no valid task\n");
|
2015-05-23 14:35:45 +02:00
|
|
|
while(1) {
|
|
|
|
HALT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-31 12:42:19 +02:00
|
|
|
|
2015-05-23 14:35:45 +02:00
|
|
|
void NORETURN leave_kernel_task(void) {
|
|
|
|
int result;
|
|
|
|
|
|
|
|
result = 0; //get_return_value();
|
|
|
|
do_exit(result);
|
|
|
|
}
|
|
|
|
|
2016-08-31 12:42:19 +02:00
|
|
|
|
2015-12-29 02:22:41 +01:00
|
|
|
void NORETURN do_abort(void) {
|
2015-05-23 14:35:45 +02:00
|
|
|
do_exit(-1);
|
|
|
|
}
|
|
|
|
|
2016-08-31 12:42:19 +02:00
|
|
|
|
2016-08-26 07:09:52 +02:00
|
|
|
static uint32_t get_next_core_id(void)
|
2015-11-09 23:54:03 +01:00
|
|
|
{
|
|
|
|
uint32_t i;
|
|
|
|
static uint32_t core_id = MAX_CORES;
|
|
|
|
|
|
|
|
if (core_id >= MAX_CORES)
|
|
|
|
core_id = CORE_ID;
|
|
|
|
|
|
|
|
// we assume OpenMP applications
|
|
|
|
// => number of threads is (normaly) equal to the number of cores
|
|
|
|
// => search next available core
|
2016-08-26 07:01:25 +02:00
|
|
|
for(i=0, core_id=(core_id+1)%MAX_CORES; i<2*MAX_CORES; i++, core_id=(core_id+1)%MAX_CORES)
|
2015-11-09 23:54:03 +01:00
|
|
|
if (readyqueues[core_id].idle)
|
|
|
|
break;
|
|
|
|
|
2016-08-26 07:05:56 +02:00
|
|
|
if (BUILTIN_EXPECT(!readyqueues[core_id].idle, 0)) {
|
2016-11-04 12:09:43 +01:00
|
|
|
LOG_ERROR("BUG: no core available!\n");
|
2016-08-26 07:01:25 +02:00
|
|
|
return MAX_CORES;
|
|
|
|
}
|
|
|
|
|
2015-11-09 23:54:03 +01:00
|
|
|
return core_id;
|
|
|
|
}
|
|
|
|
|
2016-08-31 12:42:19 +02:00
|
|
|
|
2015-09-06 10:41:51 +02:00
|
|
|
int clone_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio)
|
2015-08-09 14:34:36 +02:00
|
|
|
{
|
|
|
|
int ret = -EINVAL;
|
|
|
|
uint32_t i;
|
|
|
|
void* stack = NULL;
|
2016-06-03 06:24:46 +02:00
|
|
|
void* ist = NULL;
|
2015-08-09 14:34:36 +02:00
|
|
|
task_t* curr_task;
|
2015-11-09 23:54:03 +01:00
|
|
|
uint32_t core_id;
|
2015-08-09 14:34:36 +02:00
|
|
|
|
|
|
|
if (BUILTIN_EXPECT(!ep, 0))
|
|
|
|
return -EINVAL;
|
|
|
|
if (BUILTIN_EXPECT(prio == IDLE_PRIO, 0))
|
|
|
|
return -EINVAL;
|
|
|
|
if (BUILTIN_EXPECT(prio > MAX_PRIO, 0))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
curr_task = per_core(current_task);
|
|
|
|
|
2016-06-03 06:24:46 +02:00
|
|
|
stack = create_stack(DEFAULT_STACK_SIZE);
|
2015-08-09 14:34:36 +02:00
|
|
|
if (BUILTIN_EXPECT(!stack, 0))
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2016-06-03 06:24:46 +02:00
|
|
|
ist = create_stack(KERNEL_STACK_SIZE);
|
|
|
|
if (BUILTIN_EXPECT(!ist, 0)) {
|
|
|
|
destroy_stack(stack, DEFAULT_STACK_SIZE);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2015-08-09 14:34:36 +02:00
|
|
|
spinlock_irqsave_lock(&table_lock);
|
|
|
|
|
2015-11-09 23:54:03 +01:00
|
|
|
core_id = get_next_core_id();
|
2016-08-26 07:01:25 +02:00
|
|
|
if (BUILTIN_EXPECT(core_id >= MAX_CORES, 0))
|
|
|
|
{
|
|
|
|
spinlock_irqsave_unlock(&table_lock);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
2015-09-06 10:41:51 +02:00
|
|
|
|
2015-08-09 14:34:36 +02:00
|
|
|
for(i=0; i<MAX_TASKS; i++) {
|
|
|
|
if (task_table[i].status == TASK_INVALID) {
|
|
|
|
task_table[i].id = i;
|
|
|
|
task_table[i].status = TASK_READY;
|
2016-07-08 18:23:51 +02:00
|
|
|
task_table[i].last_core = core_id;
|
2015-08-09 14:34:36 +02:00
|
|
|
task_table[i].last_stack_pointer = NULL;
|
|
|
|
task_table[i].stack = stack;
|
|
|
|
task_table[i].prio = prio;
|
|
|
|
task_table[i].heap = curr_task->heap;
|
2017-06-07 21:54:28 +02:00
|
|
|
task_table[i].start_tick = get_clock_tick();
|
2016-09-22 18:18:27 +02:00
|
|
|
task_table[i].last_tsc = 0;
|
2015-08-09 14:34:36 +02:00
|
|
|
task_table[i].parent = curr_task->id;
|
|
|
|
task_table[i].tls_addr = curr_task->tls_addr;
|
2015-12-30 02:17:23 +01:00
|
|
|
task_table[i].tls_size = curr_task->tls_size;
|
2016-06-03 06:24:46 +02:00
|
|
|
task_table[i].ist_addr = ist;
|
2015-09-13 21:28:30 +02:00
|
|
|
task_table[i].lwip_err = 0;
|
2016-08-25 17:14:47 +02:00
|
|
|
task_table[i].signal_handler = NULL;
|
2015-08-09 14:34:36 +02:00
|
|
|
|
|
|
|
if (id)
|
|
|
|
*id = i;
|
|
|
|
|
2015-09-13 14:51:25 +02:00
|
|
|
ret = create_default_frame(task_table+i, ep, arg, core_id);
|
2015-08-09 14:34:36 +02:00
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
// add task in the readyqueues
|
|
|
|
spinlock_irqsave_lock(&readyqueues[core_id].lock);
|
|
|
|
readyqueues[core_id].prio_bitmap |= (1 << prio);
|
|
|
|
readyqueues[core_id].nr_tasks++;
|
|
|
|
if (!readyqueues[core_id].queue[prio-1].first) {
|
|
|
|
task_table[i].next = task_table[i].prev = NULL;
|
|
|
|
readyqueues[core_id].queue[prio-1].first = task_table+i;
|
|
|
|
readyqueues[core_id].queue[prio-1].last = task_table+i;
|
|
|
|
} else {
|
|
|
|
task_table[i].prev = readyqueues[core_id].queue[prio-1].last;
|
|
|
|
task_table[i].next = NULL;
|
|
|
|
readyqueues[core_id].queue[prio-1].last->next = task_table+i;
|
|
|
|
readyqueues[core_id].queue[prio-1].last = task_table+i;
|
|
|
|
}
|
2017-06-07 21:54:28 +02:00
|
|
|
// should we wakeup the core?
|
2018-04-28 15:28:15 +02:00
|
|
|
if (readyqueues[core_id].nr_tasks == 1)
|
2017-06-07 21:54:28 +02:00
|
|
|
wakeup_core(core_id);
|
2015-08-09 14:34:36 +02:00
|
|
|
spinlock_irqsave_unlock(&readyqueues[core_id].lock);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
spinlock_irqsave_unlock(&table_lock);
|
2016-01-04 01:33:37 +01:00
|
|
|
|
2017-04-21 19:48:05 +02:00
|
|
|
if (!ret) {
|
|
|
|
LOG_DEBUG("start new thread %d on core %d with stack address %p\n", i, core_id, stack);
|
|
|
|
}
|
2016-01-04 01:33:37 +01:00
|
|
|
|
2015-10-03 01:43:17 +02:00
|
|
|
out:
|
2016-06-24 20:45:21 +02:00
|
|
|
if (ret) {
|
2016-06-03 06:24:46 +02:00
|
|
|
destroy_stack(stack, DEFAULT_STACK_SIZE);
|
2016-06-24 20:45:21 +02:00
|
|
|
destroy_stack(ist, KERNEL_STACK_SIZE);
|
|
|
|
}
|
2015-08-09 14:34:36 +02:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-08-31 12:42:19 +02:00
|
|
|
|
2015-07-11 00:16:27 +02:00
|
|
|
int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio, uint32_t core_id)
|
2015-05-23 14:35:45 +02:00
|
|
|
{
|
|
|
|
int ret = -ENOMEM;
|
|
|
|
uint32_t i;
|
2015-07-26 18:50:32 +02:00
|
|
|
void* stack = NULL;
|
2016-06-03 06:24:46 +02:00
|
|
|
void* ist = NULL;
|
2015-08-09 14:34:36 +02:00
|
|
|
void* counter = NULL;
|
2015-05-23 14:35:45 +02:00
|
|
|
|
|
|
|
if (BUILTIN_EXPECT(!ep, 0))
|
|
|
|
return -EINVAL;
|
|
|
|
if (BUILTIN_EXPECT(prio == IDLE_PRIO, 0))
|
|
|
|
return -EINVAL;
|
|
|
|
if (BUILTIN_EXPECT(prio > MAX_PRIO, 0))
|
|
|
|
return -EINVAL;
|
2015-05-31 07:58:05 +02:00
|
|
|
if (BUILTIN_EXPECT(core_id >= MAX_CORES, 0))
|
|
|
|
return -EINVAL;
|
|
|
|
if (BUILTIN_EXPECT(!readyqueues[core_id].idle, 0))
|
|
|
|
return -EINVAL;
|
2015-05-23 14:35:45 +02:00
|
|
|
|
2016-06-03 06:24:46 +02:00
|
|
|
stack = create_stack(DEFAULT_STACK_SIZE);
|
2015-07-26 18:50:32 +02:00
|
|
|
if (BUILTIN_EXPECT(!stack, 0))
|
|
|
|
return -ENOMEM;
|
2016-01-01 17:48:03 +01:00
|
|
|
|
2016-06-03 06:24:46 +02:00
|
|
|
ist = create_stack(KERNEL_STACK_SIZE);
|
|
|
|
if (BUILTIN_EXPECT(!ist, 0)) {
|
|
|
|
destroy_stack(stack, DEFAULT_STACK_SIZE);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2015-08-09 14:34:36 +02:00
|
|
|
counter = kmalloc(sizeof(atomic_int64_t));
|
|
|
|
if (BUILTIN_EXPECT(!counter, 0)) {
|
2016-06-03 06:24:46 +02:00
|
|
|
destroy_stack(stack, KERNEL_STACK_SIZE);
|
|
|
|
destroy_stack(stack, DEFAULT_STACK_SIZE);
|
2015-08-09 14:34:36 +02:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
atomic_int64_set((atomic_int64_t*) counter, 0);
|
2015-07-26 18:50:32 +02:00
|
|
|
|
2015-05-23 14:35:45 +02:00
|
|
|
spinlock_irqsave_lock(&table_lock);
|
|
|
|
|
|
|
|
for(i=0; i<MAX_TASKS; i++) {
|
|
|
|
if (task_table[i].status == TASK_INVALID) {
|
|
|
|
task_table[i].id = i;
|
|
|
|
task_table[i].status = TASK_READY;
|
2016-07-08 18:23:51 +02:00
|
|
|
task_table[i].last_core = core_id;
|
2015-05-23 14:35:45 +02:00
|
|
|
task_table[i].last_stack_pointer = NULL;
|
2015-07-26 18:50:32 +02:00
|
|
|
task_table[i].stack = stack;
|
2015-05-23 14:35:45 +02:00
|
|
|
task_table[i].prio = prio;
|
|
|
|
task_table[i].heap = NULL;
|
2015-07-16 22:33:29 +02:00
|
|
|
task_table[i].start_tick = get_clock_tick();
|
2016-09-22 18:18:27 +02:00
|
|
|
task_table[i].last_tsc = 0;
|
2015-08-09 14:34:36 +02:00
|
|
|
task_table[i].parent = 0;
|
2016-06-03 06:24:46 +02:00
|
|
|
task_table[i].ist_addr = ist;
|
2015-08-09 14:34:36 +02:00
|
|
|
task_table[i].tls_addr = 0;
|
2015-12-30 02:17:23 +01:00
|
|
|
task_table[i].tls_size = 0;
|
2015-09-13 21:28:30 +02:00
|
|
|
task_table[i].lwip_err = 0;
|
2016-08-25 17:14:47 +02:00
|
|
|
task_table[i].signal_handler = NULL;
|
2015-05-23 14:35:45 +02:00
|
|
|
|
|
|
|
if (id)
|
|
|
|
*id = i;
|
|
|
|
|
2015-09-13 14:51:25 +02:00
|
|
|
ret = create_default_frame(task_table+i, ep, arg, core_id);
|
2015-07-26 18:50:32 +02:00
|
|
|
if (ret)
|
|
|
|
goto out;
|
2015-05-23 14:35:45 +02:00
|
|
|
|
|
|
|
// add task in the readyqueues
|
2015-05-27 00:04:01 +02:00
|
|
|
spinlock_irqsave_lock(&readyqueues[core_id].lock);
|
|
|
|
readyqueues[core_id].prio_bitmap |= (1 << prio);
|
|
|
|
readyqueues[core_id].nr_tasks++;
|
|
|
|
if (!readyqueues[core_id].queue[prio-1].first) {
|
2015-05-23 14:35:45 +02:00
|
|
|
task_table[i].next = task_table[i].prev = NULL;
|
2015-05-27 00:04:01 +02:00
|
|
|
readyqueues[core_id].queue[prio-1].first = task_table+i;
|
|
|
|
readyqueues[core_id].queue[prio-1].last = task_table+i;
|
2015-05-23 14:35:45 +02:00
|
|
|
} else {
|
2015-05-27 00:04:01 +02:00
|
|
|
task_table[i].prev = readyqueues[core_id].queue[prio-1].last;
|
2015-05-23 14:35:45 +02:00
|
|
|
task_table[i].next = NULL;
|
2015-05-27 00:04:01 +02:00
|
|
|
readyqueues[core_id].queue[prio-1].last->next = task_table+i;
|
|
|
|
readyqueues[core_id].queue[prio-1].last = task_table+i;
|
2015-05-23 14:35:45 +02:00
|
|
|
}
|
2015-05-27 00:04:01 +02:00
|
|
|
spinlock_irqsave_unlock(&readyqueues[core_id].lock);
|
2015-05-23 14:35:45 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-04 17:28:51 +01:00
|
|
|
if (!ret)
|
2019-08-06 20:58:19 +02:00
|
|
|
LOG_DEBUG("start new task %d on core %d with stack address %p\n", i, core_id, stack);
|
2016-01-04 17:28:51 +01:00
|
|
|
|
2015-05-23 14:35:45 +02:00
|
|
|
out:
|
|
|
|
spinlock_irqsave_unlock(&table_lock);
|
|
|
|
|
2015-08-09 14:34:36 +02:00
|
|
|
if (ret) {
|
2016-06-03 06:24:46 +02:00
|
|
|
destroy_stack(stack, DEFAULT_STACK_SIZE);
|
2016-06-24 21:06:50 +02:00
|
|
|
destroy_stack(ist, KERNEL_STACK_SIZE);
|
2015-08-09 14:34:36 +02:00
|
|
|
kfree(counter);
|
|
|
|
}
|
2015-07-26 18:50:32 +02:00
|
|
|
|
2015-05-23 14:35:45 +02:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-08-31 12:42:19 +02:00
|
|
|
|
2015-05-31 07:58:05 +02:00
|
|
|
int create_kernel_task_on_core(tid_t* id, entry_point_t ep, void* args, uint8_t prio, uint32_t core_id)
|
|
|
|
{
|
|
|
|
if (prio > MAX_PRIO)
|
|
|
|
prio = NORMAL_PRIO;
|
|
|
|
|
|
|
|
return create_task(id, ep, args, prio, core_id);
|
|
|
|
}
|
|
|
|
|
2016-08-31 12:42:19 +02:00
|
|
|
|
2015-05-23 14:35:45 +02:00
|
|
|
int create_kernel_task(tid_t* id, entry_point_t ep, void* args, uint8_t prio)
|
|
|
|
{
|
|
|
|
if (prio > MAX_PRIO)
|
|
|
|
prio = NORMAL_PRIO;
|
|
|
|
|
2015-05-27 00:04:01 +02:00
|
|
|
return create_task(id, ep, args, prio, CORE_ID);
|
2015-05-23 14:35:45 +02:00
|
|
|
}
|
|
|
|
|
2016-08-31 12:42:19 +02:00
|
|
|
|
2015-05-23 14:35:45 +02:00
|
|
|
int wakeup_task(tid_t id)
|
|
|
|
{
|
|
|
|
task_t* task;
|
2016-08-31 12:42:19 +02:00
|
|
|
uint32_t core_id;
|
2015-05-23 14:35:45 +02:00
|
|
|
int ret = -EINVAL;
|
|
|
|
|
2018-04-28 15:28:15 +02:00
|
|
|
spinlock_irqsave_lock(&table_lock);
|
2016-08-31 12:42:19 +02:00
|
|
|
task = &task_table[id];
|
2015-05-27 00:04:01 +02:00
|
|
|
core_id = task->last_core;
|
2015-05-23 14:35:45 +02:00
|
|
|
|
|
|
|
if (task->status == TASK_BLOCKED) {
|
2017-09-23 10:26:41 +02:00
|
|
|
LOG_DEBUG("wakeup task %d on core %d\n", id, core_id);
|
2017-05-21 23:39:04 +02:00
|
|
|
|
2015-05-23 14:35:45 +02:00
|
|
|
task->status = TASK_READY;
|
2018-04-28 15:28:15 +02:00
|
|
|
spinlock_irqsave_unlock(&table_lock);
|
|
|
|
|
2015-05-23 14:35:45 +02:00
|
|
|
ret = 0;
|
|
|
|
|
2015-05-27 00:04:01 +02:00
|
|
|
spinlock_irqsave_lock(&readyqueues[core_id].lock);
|
2015-05-23 14:35:45 +02:00
|
|
|
|
2016-08-31 12:42:19 +02:00
|
|
|
// if task is in timer queue, remove it
|
2015-06-11 09:20:34 +02:00
|
|
|
if (task->flags & TASK_TIMER) {
|
|
|
|
task->flags &= ~TASK_TIMER;
|
2016-09-01 15:53:43 +02:00
|
|
|
|
2016-08-31 12:42:19 +02:00
|
|
|
timer_queue_remove(core_id, task);
|
2015-06-11 09:20:34 +02:00
|
|
|
}
|
|
|
|
|
2016-08-31 12:42:19 +02:00
|
|
|
// add task to the ready queue
|
|
|
|
readyqueues_push_back(core_id, task);
|
2015-10-06 13:48:25 +02:00
|
|
|
|
2017-09-23 10:26:41 +02:00
|
|
|
// increase the number of ready tasks
|
|
|
|
readyqueues[core_id].nr_tasks++;
|
|
|
|
|
|
|
|
// should we wakeup the core?
|
2018-04-28 15:28:15 +02:00
|
|
|
if (readyqueues[core_id].nr_tasks == 1)
|
2017-09-23 10:26:41 +02:00
|
|
|
wakeup_core(core_id);
|
|
|
|
|
|
|
|
LOG_DEBUG("update nr_tasks on core %d to %d\n", core_id, readyqueues[core_id].nr_tasks);
|
|
|
|
|
2016-08-31 12:42:19 +02:00
|
|
|
spinlock_irqsave_unlock(&readyqueues[core_id].lock);
|
2018-04-28 15:28:15 +02:00
|
|
|
} else {
|
|
|
|
spinlock_irqsave_unlock(&table_lock);
|
2015-05-23 14:35:45 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-08-31 12:42:19 +02:00
|
|
|
|
|
|
|
int block_task(tid_t id)
|
2015-05-23 14:35:45 +02:00
|
|
|
{
|
2016-08-31 12:42:19 +02:00
|
|
|
task_t* task;
|
|
|
|
uint32_t core_id;
|
2015-05-23 14:35:45 +02:00
|
|
|
int ret = -EINVAL;
|
|
|
|
uint8_t flags;
|
|
|
|
|
|
|
|
flags = irq_nested_disable();
|
|
|
|
|
2016-08-31 12:42:19 +02:00
|
|
|
task = &task_table[id];
|
|
|
|
core_id = task->last_core;
|
2015-05-23 14:35:45 +02:00
|
|
|
|
2016-08-31 12:42:19 +02:00
|
|
|
if (task->status == TASK_RUNNING) {
|
2017-09-23 10:26:41 +02:00
|
|
|
LOG_DEBUG("block task %d on core %d\n", id, core_id);
|
2017-05-21 23:39:04 +02:00
|
|
|
|
2016-08-31 12:42:19 +02:00
|
|
|
task->status = TASK_BLOCKED;
|
2015-05-23 14:35:45 +02:00
|
|
|
|
2015-05-27 00:04:01 +02:00
|
|
|
spinlock_irqsave_lock(&readyqueues[core_id].lock);
|
2015-05-23 14:35:45 +02:00
|
|
|
|
2016-08-31 12:42:19 +02:00
|
|
|
// remove task from ready queue
|
|
|
|
readyqueues_remove(core_id, task);
|
|
|
|
|
2017-09-23 10:26:41 +02:00
|
|
|
// reduce the number of ready tasks
|
|
|
|
readyqueues[core_id].nr_tasks--;
|
|
|
|
LOG_DEBUG("update nr_tasks on core %d to %d\n", core_id, readyqueues[core_id].nr_tasks);
|
|
|
|
|
2015-05-27 00:04:01 +02:00
|
|
|
spinlock_irqsave_unlock(&readyqueues[core_id].lock);
|
2016-08-31 12:42:19 +02:00
|
|
|
|
|
|
|
ret = 0;
|
2015-05-23 14:35:45 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
irq_nested_enable(flags);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-08-31 12:42:19 +02:00
|
|
|
|
|
|
|
int block_current_task(void)
|
|
|
|
{
|
|
|
|
return block_task(per_core(current_task)->id);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-06-11 09:20:34 +02:00
|
|
|
int set_timer(uint64_t deadline)
|
|
|
|
{
|
|
|
|
task_t* curr_task;
|
2016-08-31 12:42:19 +02:00
|
|
|
uint32_t core_id;
|
2016-08-31 13:36:39 +02:00
|
|
|
uint8_t flags;
|
2015-06-11 09:20:34 +02:00
|
|
|
int ret = -EINVAL;
|
|
|
|
|
|
|
|
flags = irq_nested_disable();
|
|
|
|
|
|
|
|
curr_task = per_core(current_task);
|
|
|
|
core_id = CORE_ID;
|
|
|
|
|
|
|
|
if (curr_task->status == TASK_RUNNING) {
|
2016-08-31 12:42:19 +02:00
|
|
|
// blocks task and removes from ready queue
|
|
|
|
block_task(curr_task->id);
|
2015-06-11 09:20:34 +02:00
|
|
|
|
2016-08-31 12:42:19 +02:00
|
|
|
curr_task->flags |= TASK_TIMER;
|
|
|
|
curr_task->timeout = deadline;
|
2015-06-11 09:20:34 +02:00
|
|
|
|
2016-08-31 12:42:19 +02:00
|
|
|
timer_queue_push(core_id, curr_task);
|
2015-06-11 09:20:34 +02:00
|
|
|
|
2016-08-31 12:42:19 +02:00
|
|
|
ret = 0;
|
|
|
|
} else {
|
2019-08-06 20:58:19 +02:00
|
|
|
LOG_DEBUG("Task is already blocked. No timer will be set!\n");
|
2016-08-31 12:42:19 +02:00
|
|
|
}
|
2015-06-11 09:20:34 +02:00
|
|
|
|
|
|
|
irq_nested_enable(flags);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-08-31 12:42:19 +02:00
|
|
|
|
2015-06-11 09:20:34 +02:00
|
|
|
void check_timers(void)
|
|
|
|
{
|
2016-08-31 12:42:19 +02:00
|
|
|
readyqueues_t* readyqueue = &readyqueues[CORE_ID];
|
|
|
|
spinlock_irqsave_lock(&readyqueue->lock);
|
2015-06-11 09:20:34 +02:00
|
|
|
|
2016-08-31 12:42:19 +02:00
|
|
|
// since IRQs are disabled, get_clock_tick() won't increase here
|
|
|
|
const uint64_t current_tick = get_clock_tick();
|
2015-06-11 09:20:34 +02:00
|
|
|
|
2016-08-31 12:42:19 +02:00
|
|
|
// wakeup tasks whose deadline has expired
|
|
|
|
task_t* task;
|
|
|
|
while ((task = readyqueue->timers.first) && (task->timeout <= current_tick))
|
2015-06-11 09:20:34 +02:00
|
|
|
{
|
2016-08-31 12:42:19 +02:00
|
|
|
// pops task from timer queue, so next iteration has new first element
|
|
|
|
wakeup_task(task->id);
|
2015-06-11 09:20:34 +02:00
|
|
|
}
|
|
|
|
|
2018-04-29 23:50:02 +02:00
|
|
|
#ifdef DYNAMIC_TICKS
|
|
|
|
task = readyqueue->timers.first;
|
|
|
|
if (task) {
|
|
|
|
update_timer(task);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2016-08-31 12:42:19 +02:00
|
|
|
spinlock_irqsave_unlock(&readyqueue->lock);
|
2015-06-11 09:20:34 +02:00
|
|
|
}
|
|
|
|
|
2016-08-31 12:42:19 +02:00
|
|
|
|
2015-05-23 14:35:45 +02:00
|
|
|
size_t** scheduler(void)
|
|
|
|
{
|
|
|
|
task_t* orig_task;
|
2015-05-27 00:04:01 +02:00
|
|
|
task_t* curr_task;
|
2016-08-31 12:42:19 +02:00
|
|
|
const uint32_t core_id = CORE_ID;
|
|
|
|
uint64_t prio;
|
2015-05-23 14:35:45 +02:00
|
|
|
|
2015-05-27 00:04:01 +02:00
|
|
|
orig_task = curr_task = per_core(current_task);
|
|
|
|
curr_task->last_core = core_id;
|
2015-05-23 14:35:45 +02:00
|
|
|
|
2015-05-27 00:04:01 +02:00
|
|
|
spinlock_irqsave_lock(&readyqueues[core_id].lock);
|
2015-05-23 14:35:45 +02:00
|
|
|
|
2015-08-28 08:00:54 +02:00
|
|
|
/* signalizes that this task could be realized */
|
|
|
|
if (curr_task->status == TASK_FINISHED)
|
2015-05-27 00:04:01 +02:00
|
|
|
readyqueues[core_id].old_task = curr_task;
|
2015-08-28 08:00:54 +02:00
|
|
|
else readyqueues[core_id].old_task = NULL; // reset old task
|
2015-05-23 14:35:45 +02:00
|
|
|
|
2016-03-25 09:22:42 +01:00
|
|
|
// do we receive a shutdown IPI => only the idle task should get the core
|
|
|
|
if (BUILTIN_EXPECT(go_down, 0)) {
|
|
|
|
if (curr_task->status == TASK_IDLE)
|
|
|
|
goto get_task_out;
|
|
|
|
curr_task = readyqueues[core_id].idle;
|
|
|
|
set_per_core(current_task, curr_task);
|
|
|
|
}
|
|
|
|
|
2016-08-31 12:42:19 +02:00
|
|
|
// determine highest priority
|
|
|
|
prio = msb(readyqueues[core_id].prio_bitmap);
|
|
|
|
|
|
|
|
const int readyqueue_empty = prio > MAX_PRIO;
|
|
|
|
if (readyqueue_empty) {
|
|
|
|
|
2015-05-27 00:04:01 +02:00
|
|
|
if ((curr_task->status == TASK_RUNNING) || (curr_task->status == TASK_IDLE))
|
2015-05-23 14:35:45 +02:00
|
|
|
goto get_task_out;
|
2015-07-19 11:12:05 +02:00
|
|
|
curr_task = readyqueues[core_id].idle;
|
2015-07-21 18:44:13 +02:00
|
|
|
set_per_core(current_task, curr_task);
|
2015-05-23 14:35:45 +02:00
|
|
|
} else {
|
|
|
|
// Does the current task have an higher priority? => no task switch
|
2015-05-27 00:04:01 +02:00
|
|
|
if ((curr_task->prio > prio) && (curr_task->status == TASK_RUNNING))
|
2015-05-23 14:35:45 +02:00
|
|
|
goto get_task_out;
|
|
|
|
|
2016-08-31 12:42:19 +02:00
|
|
|
// mark current task for later cleanup by finish_task_switch()
|
2015-05-27 00:04:01 +02:00
|
|
|
if (curr_task->status == TASK_RUNNING) {
|
|
|
|
curr_task->status = TASK_READY;
|
|
|
|
readyqueues[core_id].old_task = curr_task;
|
2015-05-23 14:35:45 +02:00
|
|
|
}
|
|
|
|
|
2016-08-31 12:42:19 +02:00
|
|
|
// get new task from its ready queue
|
|
|
|
curr_task = task_list_pop_front(&readyqueues[core_id].queue[prio-1]);
|
|
|
|
|
|
|
|
if(BUILTIN_EXPECT(curr_task == NULL, 0)) {
|
2018-03-23 04:04:44 -04:00
|
|
|
kputs("Kernel panic: No task in readyqueue\n");
|
2016-11-04 12:09:43 +01:00
|
|
|
LOG_ERROR("Kernel panic: No task in readyqueue\n");
|
2016-08-31 12:42:19 +02:00
|
|
|
while(1);
|
|
|
|
}
|
2015-05-27 00:04:01 +02:00
|
|
|
if (BUILTIN_EXPECT(curr_task->status == TASK_INVALID, 0)) {
|
2016-11-04 12:09:43 +01:00
|
|
|
LOG_ERROR("Kernel panic: Got invalid task %d, orig task %d\n",
|
2016-08-31 12:42:19 +02:00
|
|
|
curr_task->id, orig_task->id);
|
|
|
|
while(1);
|
2015-05-23 14:35:45 +02:00
|
|
|
}
|
|
|
|
|
2016-08-31 12:42:19 +02:00
|
|
|
// if we removed the last task from queue, update priority bitmap
|
|
|
|
if(readyqueues[core_id].queue[prio-1].first == NULL) {
|
2015-05-27 00:04:01 +02:00
|
|
|
readyqueues[core_id].prio_bitmap &= ~(1 << prio);
|
2015-05-23 14:35:45 +02:00
|
|
|
}
|
2016-08-31 12:42:19 +02:00
|
|
|
|
|
|
|
// finally make it the new current task
|
|
|
|
curr_task->status = TASK_RUNNING;
|
2016-09-22 18:31:03 +02:00
|
|
|
#ifdef DYNAMIC_TICKS
|
2016-09-22 18:18:27 +02:00
|
|
|
curr_task->last_tsc = get_rdtsc();
|
2016-09-22 18:31:03 +02:00
|
|
|
#endif
|
2016-08-31 12:42:19 +02:00
|
|
|
set_per_core(current_task, curr_task);
|
2015-05-23 14:35:45 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
get_task_out:
|
2016-02-15 08:54:49 +01:00
|
|
|
spinlock_irqsave_unlock(&readyqueues[core_id].lock);
|
2015-07-12 16:39:27 +02:00
|
|
|
|
2016-02-15 08:54:49 +01:00
|
|
|
if (curr_task != orig_task) {
|
2016-11-04 12:09:43 +01:00
|
|
|
LOG_DEBUG("schedule on core %d from %u to %u with prio %u\n", core_id, orig_task->id, curr_task->id, (uint32_t)curr_task->prio);
|
2015-05-23 14:35:45 +02:00
|
|
|
|
|
|
|
return (size_t**) &(orig_task->last_stack_pointer);
|
2016-02-15 08:54:49 +01:00
|
|
|
}
|
2015-05-23 14:35:45 +02:00
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-07-01 18:52:28 +02:00
|
|
|
|
|
|
|
int get_task(tid_t id, task_t** task)
|
|
|
|
{
|
|
|
|
if (BUILTIN_EXPECT(task == NULL, 0)) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (BUILTIN_EXPECT(id >= MAX_TASKS, 0)) {
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (BUILTIN_EXPECT(task_table[id].status == TASK_INVALID, 0)) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
*task = &task_table[id];
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|