/* * Copyright 2010 Stefan Lankes, Chair for Operating Systems, * RWTH Aachen University * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * This file is part of MetalSVM. */ #include #include #include #include #include #include #include #include #include task_t* current_task = NULL; static task_t task_table[MAX_TASKS]; static spinlock_t table_lock = SPINLOCK_INIT; int multitasking_init(void) { memset(task_table, 0x00, sizeof(task_t)*MAX_TASKS); task_table[0].status = TASK_RUNNING; current_task = task_table+0; register_task(current_task); return 0; } static void wakeup_blocked_tasks(int result) { unsigned int i; spinlock_lock_irqsave(&table_lock); /* wake up blocked tasks */ for(i=0; imbox[i]) { mailbox_int32_post(current_task->mbox[i], result); current_task->mbox[i] = NULL; } } spinlock_unlock_irqsave(&table_lock); } static void NORETURN do_exit(int arg) { kprintf("Terminate task: %u, return value %d\n", current_task->id, arg); wakeup_blocked_tasks(arg); if (current_task->ustack) kfree(current_task->ustack, current_task->stack_size); if (current_task->mem_usage) kprintf("Memory leak! Task %d did not release %d bytes\n", current_task->id, current_task->mem_usage); current_task->status = TASK_FINISHED; reschedule(); kputs("Kernel panic: scheduler found no valid task\n"); while(1) { NOP8; } } void NORETURN leave_kernel_task(void) { int result = 0; get_return_value(result); do_exit(result); } void NORETURN leave_user_task(void) { int result = 0; get_return_value(result); SYSCALL1(__NR_exit, result); kprintf("Kernel panic! Task %d comes back from syscall \"exit\"\n", current_task->id); while(1) ; } void NORETURN sys_exit(int arg) { do_exit(arg); } void NORETURN abort(void) { do_exit(-1); } static int create_task(tid_t* id, entry_point_t ep, void* arg, size_t stack_size, int user) { int ret = -1; unsigned int i; if (BUILTIN_EXPECT(!ep, 0)) return -1; if (user && !stack_size) stack_size = DEFAULT_STACK_SIZE; spinlock_lock_irqsave(&table_lock); for(i=0; istatus == TASK_IDLE, 0)) goto join_out; /* a task is not able to wait for itself */ if (BUILTIN_EXPECT(current_task->id == id, 0)) goto join_out; /* invalid id */ if (BUILTIN_EXPECT(id >= MAX_TASKS, 0)) goto join_out; /* task already finished */ if (BUILTIN_EXPECT(task_table[id].status == TASK_INVALID, 0)) goto join_out; /* task already finished */ if (BUILTIN_EXPECT(task_table[id].status == TASK_FINISHED, 0)) goto join_out; task_table[id].mbox[current_task->id] = &mbox; spinlock_unlock_irqsave(&table_lock); mailbox_int32_fetch(&mbox, &tmp); if (result) *result = tmp; mailbox_int32_destroy(&mbox); return 0; join_out: spinlock_unlock_irqsave(&table_lock); mailbox_int32_destroy(&mbox); return -1; } int wakeup_task(tid_t id) { task_table[id].status = TASK_READY; return 0; } void scheduler(void) { unsigned int i, new_id; spinlock_lock(&table_lock); /* signalize that this task could be reuse */ if (current_task->status == TASK_FINISHED) current_task->status = TASK_INVALID; for(i=1; i <= MAX_TASKS; i++) { new_id = (current_task->id + i) % MAX_TASKS; if (task_table[new_id].status == TASK_READY) { if (current_task->status == TASK_RUNNING) current_task->status = TASK_READY; task_table[new_id].status = TASK_RUNNING; current_task = task_table+new_id; goto get_task_out; } } if (current_task->status == TASK_RUNNING) goto get_task_out; /* * we switch to the idle task (id=0), if the current task terminates * and no other is ready */ current_task = task_table+0; get_task_out: spinlock_unlock(&table_lock); }