/* 
 * Copyright 2010 Stefan Lankes, Chair for Operating Systems,
 *                               RWTH Aachen University
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 *
 * This file is part of MetalSVM. 
 */

#include <metalsvm/stdio.h>
#include <metalsvm/stdlib.h>
#include <metalsvm/string.h>
#include <metalsvm/errno.h>
#include <metalsvm/mmu.h>
#include <metalsvm/page.h>
#include <metalsvm/tasks.h>
#include <metalsvm/processor.h>
#include <metalsvm/spinlock.h>
#include <metalsvm/mailbox.h>
#include <metalsvm/syscall.h>

DEFINE_PER_CORE(task_t*, current_task, NULL);
static task_t task_table[MAX_TASKS] = {[0 ... MAX_TASKS-1] = {0, TASK_INVALID, NULL, 0, ATOMIC_INIT(0), NULL, NULL}};
static spinlock_t table_lock = SPINLOCK_INIT;

/*
 * helper function for the assembly code to determine the current task
 */
task_t* get_current_task(void)
{
	return per_core(current_task);
}

int multitasking_init(void) {
	unsigned int i;

	for(i=0; i<MAX_TASKS; i++) {
		if (task_table[i].status == TASK_INVALID) {
			task_table[i].id = i;
			task_table[i].status = TASK_RUNNING;
			memset(task_table[i].mbox, 0x00, sizeof(mailbox_int32_t*)*MAX_TASKS);
			per_core(current_task) = task_table+i;
			get_kernel_pgd(per_core(current_task));
			return 0;
		}
	}

	return -ENOMEM;
}

static void wakeup_blocked_tasks(int result)
{
	unsigned int i;

	spinlock_lock_irqsave(&table_lock);

	/* wake up blocked tasks */
	for(i=0; i<MAX_TASKS; i++) {
		if (per_core(current_task)->mbox[i]) {
			mailbox_int32_post(per_core(current_task)->mbox[i], result);
			per_core(current_task)->mbox[i] = NULL;
		}
	}

	spinlock_unlock_irqsave(&table_lock);
}

static void NORETURN do_exit(int arg) {
	kprintf("Terminate task: %u, return value %d\n", per_core(current_task)->id, arg);

	wakeup_blocked_tasks(arg);
	if (per_core(current_task)->ustack) 
		kfree(per_core(current_task)->ustack, per_core(current_task)->stack_size);
	if (atomic_int32_read(&per_core(current_task)->mem_usage))
		kprintf("Memory leak! Task %d did not release %d bytes\n", per_core(current_task)->id, atomic_int32_read(&per_core(current_task)->mem_usage));
	per_core(current_task)->status = TASK_FINISHED;
	reschedule();
	
	kputs("Kernel panic: scheduler found no valid task\n");
	while(1) {
		NOP8;
	}
}

void NORETURN leave_kernel_task(void) {
        int result;

        result = get_return_value();
        do_exit(result);
}

void NORETURN leave_user_task(void) {
	int result;

	result = get_return_value();
	SYSCALL1(__NR_exit, result);

	kprintf("Kernel panic! Task %d comes back from syscall \"exit\"\n", per_core(current_task)->id);

	while(1) {
		NOP8;
	}
}

void NORETURN sys_exit(int arg) 
{
	do_exit(arg);
}

void NORETURN abort(void) {
	do_exit(-1);
}

static int create_task(tid_t* id, entry_point_t ep, void* arg, size_t stack_size, int user)
{
	int ret = -ENOMEM;
	unsigned int i;

	if (BUILTIN_EXPECT(!ep, 0))
		return -EINVAL;

	if (user && !stack_size)
		stack_size = DEFAULT_STACK_SIZE;

	spinlock_lock_irqsave(&table_lock);

	for(i=0; i<MAX_TASKS; i++) {
		if (task_table[i].status == TASK_INVALID) {
			task_table[i].id = i;
			task_table[i].status = TASK_READY;

			if (user) {
				task_table[i].ustack = create_stack(task_table+i, stack_size);
				if (!task_table[i].ustack)
					break;
				task_table[i].stack_size = stack_size;
				task_table[i].pgd = NULL;
				task_table[i].pgd_lock = NULL;
			} else {
				task_table[i].ustack = NULL;
				task_table[i].stack_size = 0;
				get_kernel_pgd(task_table+i);
			}

			atomic_int32_set(&task_table[i].mem_usage, 0);		
			memset(task_table[i].mbox, 0x00, sizeof(mailbox_int32_t*)*MAX_TASKS);
		
			if (id)
				*id = i;	

			ret = create_default_frame(task_table+i, ep, arg, user);
			break;
		}
	}

	spinlock_unlock_irqsave(&table_lock);

	return ret;
}

int create_kernel_task(tid_t* id, entry_point_t ep, void* arg)
{
	return create_task(id, ep, arg, 0, 0);
}

int create_user_task(tid_t* id, entry_point_t ep, void* arg, size_t stack_size)
{
	return create_task(id, ep, arg, stack_size, 1);
}

int join_task(tid_t id, int* result) 
{
	int32_t tmp;
	mailbox_int32_t	mbox;

	mailbox_int32_init(&mbox);

	spinlock_lock_irqsave(&table_lock);

	/* 
	 * idle tasks are not allowed to wait for another task 
	 * they should always run...
	 */
	if (BUILTIN_EXPECT(per_core(current_task)->status == TASK_IDLE, 0))
		goto join_out;

	/* a task is not able to wait for itself */
	if (BUILTIN_EXPECT(per_core(current_task)->id == id, 0))
		goto join_out;

	/* invalid id */
	if (BUILTIN_EXPECT(id >= MAX_TASKS, 0))
		goto join_out;

	/* task already finished */
	if (BUILTIN_EXPECT(task_table[id].status == TASK_INVALID, 0))
		goto join_out;

	/* task already finished */
	if (BUILTIN_EXPECT(task_table[id].status == TASK_FINISHED, 0))
                goto join_out;

	task_table[id].mbox[per_core(current_task)->id] = &mbox;

	spinlock_unlock_irqsave(&table_lock);
	
	mailbox_int32_fetch(&mbox, &tmp);

	if (result)
		*result = tmp;

	mailbox_int32_destroy(&mbox);

	return 0;

join_out: 
	spinlock_unlock_irqsave(&table_lock);
	mailbox_int32_destroy(&mbox);
	return -EINVAL;
}

int wakeup_task(tid_t id)
{
	int ret = -EINVAL;
	int need_lock = !spinlock_has_lock(&table_lock);

	/* avoid nested locking */
	if (need_lock)
		spinlock_lock_irqsave(&table_lock);

	if (task_table[id].status != TASK_BLOCKED) {
		kprintf("Task %d is not blocked!\n", id);
	} else {
		task_table[id].status = TASK_READY;
		ret = 0;
	}

	if (need_lock)
		spinlock_unlock_irqsave(&table_lock);

	return ret;
}

int block_task(tid_t id)
{
	int ret = -EINVAL;
	int need_lock = !spinlock_has_lock(&table_lock);

	/* avoid nested locking */
	if (need_lock)
		spinlock_lock_irqsave(&table_lock);

        if ((task_table[id].status == TASK_RUNNING) || (task_table[id].status == TASK_READY)) {
		task_table[id].status = TASK_BLOCKED;
		ret = 0;
	} else kprintf("Unable to block task %d!\n", id);

        if (need_lock)
                spinlock_unlock_irqsave(&table_lock);

        return ret;
}

void scheduler(void) 
{
	unsigned int i;
	unsigned int new_id;

	spinlock_lock(&table_lock);

	/* signalize that this task could be reused */
	if (per_core(current_task)->status == TASK_FINISHED)
		per_core(current_task)->status = TASK_INVALID; 

	for(i=1, new_id=(per_core(current_task)->id + 1) % MAX_TASKS; 
		i<MAX_TASKS; i++, new_id=(new_id+1) % MAX_TASKS) 
	{
		if (task_table[new_id].status == TASK_READY) {
			if (per_core(current_task)->status == TASK_RUNNING)
				per_core(current_task)->status = TASK_READY;
			task_table[new_id].status = TASK_RUNNING;
	
			per_core(current_task) = task_table+new_id;
			goto get_task_out;
		}
	}

	if ((per_core(current_task)->status == TASK_RUNNING) || (per_core(current_task)->status == TASK_IDLE))
		goto get_task_out;

	/* 
	 * we switch to the idle task, if the current task terminates 
	 * and no other is ready
	 */
	for(i=0; i<MAX_TASKS; i++) {
		if (task_table[i].status == TASK_IDLE) {
			per_core(current_task) = task_table+i;
			goto get_task_out;
		}
	}

get_task_out:
	spinlock_unlock(&table_lock);
}