metalsvm/kernel/tasks.c
2010-08-04 17:20:44 +00:00

240 lines
5.5 KiB
C

/*
* Copyright 2010 Stefan Lankes, Chair for Operating Systems,
* RWTH Aachen University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This file is part of MetalSVM.
*/
#include <metalsvm/stdio.h>
#include <metalsvm/stdlib.h>
#include <metalsvm/string.h>
#include <metalsvm/mmu.h>
#include <metalsvm/tasks.h>
#include <metalsvm/processor.h>
#include <metalsvm/spinlocks.h>
task_t* current_task = NULL;
static task_t task_table[MAX_TASKS];
static spinlock_t table_lock = SPINLOCK_INIT;
int multitasking_init(void) {
memset(task_table, 0, sizeof(task_t)*MAX_TASKS);
task_table[0].id = 0;
task_table[0].mm.usage = 0;
task_table[0].stack = NULL;
task_table[0].stack_size = 8192;
task_table[0].status = TASK_RUNNING;
current_task = task_table;
return 0;
}
static void wakeup_blocked_tasks(void* result)
{
unsigned int i;
spinlock_lock_irqsave(&table_lock);
/* wake up blocked tasks */
for(i=0; i<MAX_TASKS; i++) {
if (current_task->blocked_tasks[i] && (task_table[i].status == TASK_BLOCKED)) {
task_table[i].return_value = result;
task_table[i].status = TASK_READY;
}
current_task->blocked_tasks[i] = 0;
}
spinlock_unlock_irqsave(&table_lock);
}
void NORETURN leave_task(void) {
void* result = NULL;
get_return_value(result);
kprintf("Terminate task: %u, return value = %p\n", current_task->id, result);
wakeup_blocked_tasks(result);
current_task->status = TASK_FINISHED;
schedule();
kputs("Kernel panic: scheduler found no valid task\n");
while(1) {
NOP8;
}
}
void NORETURN abort(void) {
void* result = (void*) -1;
kprintf("Abort task: %u\n", current_task->id);
wakeup_blocked_tasks(result);
current_task->status = TASK_FINISHED;
schedule();
kputs("Kernel panic: scheduler found no valid task\n");
while(1) {
NOP8;
}
}
int create_kernel_task(tid_t* id, entry_point_t ep, void* arg, size_t stack_size)
{
int ret = -1;
unsigned int i;
if (BUILTIN_EXPECT(!ep, 0))
return -1;
if (!stack_size)
stack_size = DEFAULT_STACK_SIZE;
spinlock_lock_irqsave(&table_lock);
for(i=0; i<MAX_TASKS; i++) {
if (task_table[i].status == TASK_INVALID) {
if (task_table[i].stack)
kfree(task_table[i].stack, task_table[i].stack_size);
if (task_table[i].mm.usage) {
kprintf("Task %d has a memory leax (%d byte)\n", task_table[i].id, task_table[i].mm.usage);
task_table[i].mm.usage = 0;
}
task_table[i].stack = create_stack(task_table+i, stack_size);
if (!task_table[i].stack)
break;
task_table[i].stack_size = stack_size;
task_table[i].top = task_table[i].stack + stack_size - sizeof(size_t);
task_table[i].ip = 0;
task_table[i].id = i;
memset(task_table[i].blocked_tasks, 0x00, sizeof(unsigned char)*MAX_TASKS);
task_table[i].return_value = NULL;
task_table[i].status = TASK_READY;
if (id)
*id = i;
ret = create_default_frame(task_table+i, ep, arg);
break;
}
}
spinlock_unlock_irqsave(&table_lock);
return ret;
}
int join_kernel_task(tid_t id, void** result)
{
spinlock_lock_irqsave(&table_lock);
/*
* idle tasks are not allowed to wait for another task
* they should always run...
*/
if (BUILTIN_EXPECT(current_task->status == TASK_IDLE, 0))
goto join_out;
/* a task is not able to wait for itself */
if (BUILTIN_EXPECT(current_task->id == id, 0))
goto join_out;
/* invalid id */
if (BUILTIN_EXPECT(id >= MAX_TASKS, 0))
goto join_out;
/* task already finished */
if (BUILTIN_EXPECT(task_table[id].status == TASK_INVALID, 0))
goto join_out;
/* task already finished */
if (BUILTIN_EXPECT(task_table[id].status == TASK_FINISHED, 0))
goto join_out;
task_table[id].blocked_tasks[current_task->id] = 1;
current_task->status = TASK_BLOCKED;
spinlock_unlock_irqsave(&table_lock);
schedule();
if (result) {
*result = current_task->return_value;
current_task->return_value = NULL;
}
return 0;
join_out:
spinlock_unlock_irqsave(&table_lock);
return -1;
}
int wakeup_task(tid_t id)
{
int ret = -1;
spinlock_lock_irqsave(&table_lock);
if (task_table[id].status != TASK_BLOCKED) {
kprintf("Task %u is already unblocked\n", id);
goto wakeup_out;
}
task_table[id].status = TASK_READY;
ret = 0;
wakeup_out:
spinlock_unlock_irqsave(&table_lock);
return ret;
}
task_t* get_new_task(void)
{
task_t* ret;
unsigned int i, new_id;
spinlock_lock(&table_lock);
/* signalize that this task could be reuse */
if (current_task->status == TASK_FINISHED)
current_task->status = TASK_INVALID;
for(i=1; i <= MAX_TASKS; i++) {
new_id = (current_task->id + i) % MAX_TASKS;
if (task_table[new_id].status == TASK_READY) {
if (current_task->status == TASK_RUNNING)
current_task->status = TASK_READY;
task_table[new_id].status = TASK_RUNNING;
ret = task_table+new_id;
goto get_task_out;
}
}
if (current_task->status == TASK_RUNNING) {
ret = current_task;
goto get_task_out;
}
/*
* we switch to the idle task (id=0), if the current task terminates
* and no other is ready
*/
ret = task_table+0;
get_task_out:
spinlock_unlock(&table_lock);
return ret;
}