reduce the number of function calls to determine the core id

This commit is contained in:
Stefan Lankes 2011-07-19 21:22:54 +02:00
parent 8e9accd6ed
commit dfa928de78

View file

@ -102,16 +102,17 @@ size_t get_idle_task(uint32_t id)
*/
static void wakeup_blocked_tasks(int result)
{
wait_msg_t tmp = { per_core(current_task)->id, result };
task_t* curr_task = per_core(current_task);
wait_msg_t tmp = { curr_task->id, result };
unsigned int i;
spinlock_irqsave_lock(&table_lock);
/* wake up blocked tasks */
for(i=0; i<MAX_TASKS; i++) {
if (per_core(current_task)->outbox[i]) {
mailbox_wait_msg_post(per_core(current_task)->outbox[i], tmp);
per_core(current_task)->outbox[i] = NULL;
if (curr_task->outbox[i]) {
mailbox_wait_msg_post(curr_task->outbox[i], tmp);
curr_task->outbox[i] = NULL;
}
}
@ -122,29 +123,30 @@ static void wakeup_blocked_tasks(int result)
* procedures which are called by exiting tasks. */
static void NORETURN do_exit(int arg) {
vma_t* tmp;
task_t* curr_task = per_core(current_task);
kprintf("Terminate task: %u, return value %d\n", per_core(current_task)->id, arg);
kprintf("Terminate task: %u, return value %d\n", curr_task->id, arg);
wakeup_blocked_tasks(arg);
//vma_dump(per_core(current_task));
spinlock_lock(&(per_core(current_task)->vma_lock));
//vma_dump(curr_task);
spinlock_lock(&curr_task->vma_lock);
// remove memory regions
while((tmp = per_core(current_task)->vma_list) != NULL) {
while((tmp = curr_task->vma_list) != NULL) {
kfree((void*) tmp->start, tmp->end - tmp->start + 1);
per_core(current_task)->vma_list = tmp->next;
curr_task->vma_list = tmp->next;
kfree((void*) tmp, sizeof(vma_t));
}
spinlock_unlock(&(per_core(current_task)->vma_lock));
spinlock_unlock(&curr_task->vma_lock);
drop_pgd(); // delete page directory and its page tables
if (atomic_int32_read(&per_core(current_task)->user_usage))
if (atomic_int32_read(&curr_task->user_usage))
kprintf("Memory leak! Task %d did not release %d pages\n",
per_core(current_task)->id, atomic_int32_read(&per_core(current_task)->user_usage));
per_core(current_task)->status = TASK_FINISHED;
curr_task->id, atomic_int32_read(&curr_task->user_usage));
curr_task->status = TASK_FINISHED;
reschedule();
kputs("Kernel panic: scheduler found no valid task\n");
@ -182,6 +184,7 @@ void NORETURN abort(void) {
*/
static int create_task(tid_t* id, entry_point_t ep, void* arg)
{
task_t* curr_task;
int ret = -ENOMEM;
unsigned int i;
@ -190,6 +193,8 @@ static int create_task(tid_t* id, entry_point_t ep, void* arg)
spinlock_irqsave_lock(&table_lock);
curr_task = per_core(current_task);
for(i=0; i<MAX_TASKS; i++) {
if (task_table[i].status == TASK_INVALID) {
atomic_int32_set(&task_table[i].user_usage, 0);
@ -205,7 +210,7 @@ static int create_task(tid_t* id, entry_point_t ep, void* arg)
task_table[i].vma_list = NULL;
mailbox_wait_msg_init(&task_table[i].inbox);
memset(task_table[i].outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS);
task_table[i].outbox[per_core(current_task)->id] = &per_core(current_task)->inbox;
task_table[i].outbox[curr_task->id] = &curr_task->inbox;
if (id)
*id = i;
@ -237,7 +242,7 @@ int sys_fork(void)
vma_t* parent;
vma_t* tmp;
spinlock_lock(&per_core(current_task)->vma_lock);
spinlock_lock(&parent_task->vma_lock);
spinlock_irqsave_lock(&table_lock);
for(i=0; i<MAX_TASKS; i++) {
@ -255,7 +260,7 @@ int sys_fork(void)
// copy VMA list
child = &task_table[i].vma_list;
parent = per_core(current_task)->vma_list;
parent = parent_task->vma_list;
tmp = NULL;
while(parent) {
@ -276,9 +281,9 @@ int sys_fork(void)
mailbox_wait_msg_init(&task_table[i].inbox);
memset(task_table[i].outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS);
task_table[i].outbox[per_core(current_task)->id] = &per_core(current_task)->inbox;
task_table[i].flags = per_core(current_task)->flags;
memcpy(&(task_table[i].fpu), &(per_core(current_task)->fpu), sizeof(union fpu_state));
task_table[i].outbox[parent_task->id] = &parent_task->inbox;
task_table[i].flags = parent_task->flags;
memcpy(&(task_table[i].fpu), &(parent_task->fpu), sizeof(union fpu_state));
task_table[i].start_tick = get_clock_tick();
task_table[i].start_heap = 0;
task_table[i].end_heap = 0;
@ -303,7 +308,7 @@ int sys_fork(void)
create_task_out:
spinlock_irqsave_unlock(&table_lock);
spinlock_unlock(&per_core(current_task)->vma_lock);
spinlock_unlock(&parent_task->vma_lock);
return ret;
}
@ -341,6 +346,7 @@ static int load_task(load_args_t* largs)
elf_program_header_t prog_header;
//elf_section_header_t sec_header;
vfs_node_t* node;
task_t* curr_task = per_core(current_task);
if (!largs)
return -EINVAL;
@ -399,8 +405,8 @@ static int load_task(load_args_t* largs)
memset((void*) prog_header.virt_addr, 0, npages*PAGE_SIZE);
// set starting point of the heap
if (per_core(current_task)->start_heap < prog_header.virt_addr+prog_header.mem_size)
per_core(current_task)->start_heap = per_core(current_task)->end_heap = prog_header.virt_addr+prog_header.mem_size;
if (curr_task->start_heap < prog_header.virt_addr+prog_header.mem_size)
curr_task->start_heap = curr_task->end_heap = prog_header.virt_addr+prog_header.mem_size;
// load program
read_fs(node, (uint8_t*)prog_header.virt_addr, prog_header.file_size, prog_header.offset);
@ -412,7 +418,7 @@ static int load_task(load_args_t* largs)
flags |= VMA_WRITE;
if (prog_header.flags & PF_X)
flags |= VMA_EXECUTE;
vma_add(per_core(current_task), prog_header.virt_addr, prog_header.virt_addr+npages*PAGE_SIZE-1, flags);
vma_add(curr_task, prog_header.virt_addr, prog_header.virt_addr+npages*PAGE_SIZE-1, flags);
if (!(prog_header.flags & PF_W))
change_page_permissions(prog_header.virt_addr, prog_header.virt_addr+npages*PAGE_SIZE-1, flags);
@ -441,7 +447,7 @@ static int load_task(load_args_t* largs)
flags |= VMA_WRITE;
if (prog_header.flags & PF_X)
flags |= VMA_EXECUTE;
vma_add(per_core(current_task), stack, stack+npages*PAGE_SIZE-1, flags);
vma_add(curr_task, stack, stack+npages*PAGE_SIZE-1, flags);
break;
}
}
@ -509,7 +515,7 @@ static int load_task(load_args_t* largs)
kfree(largs, sizeof(load_args_t));
// clear fpu state
per_core(current_task)->flags &= ~(TASK_FPU_USED|TASK_FPU_INIT);
curr_task->flags &= ~(TASK_FPU_USED|TASK_FPU_INIT);
jump_to_user_code(header.entry, stack+offset);
@ -587,6 +593,7 @@ int sys_execve(const char* fname, char** argv, char** env)
char *dest, *src;
int ret, argc = 0;
int envc = 0;
task_t* curr_task = per_core(current_task);
node = findnode_fs((char*) fname);
if (!node || !(node->type == FS_FILE))
@ -629,16 +636,16 @@ int sys_execve(const char* fname, char** argv, char** env)
while ((*dest++ = *src++) != 0);
}
spinlock_lock(&(per_core(current_task)->vma_lock));
spinlock_lock(&curr_task->vma_lock);
// remove old program
while((tmp = per_core(current_task)->vma_list) != NULL) {
while((tmp = curr_task->vma_list) != NULL) {
kfree((void*) tmp->start, tmp->end - tmp->start + 1);
per_core(current_task)->vma_list = tmp->next;
curr_task->vma_list = tmp->next;
kfree((void*) tmp, sizeof(vma_t));
}
spinlock_unlock(&(per_core(current_task)->vma_lock));
spinlock_unlock(&curr_task->vma_lock);
/*
* we use a trap gate to enter the kernel
@ -658,16 +665,17 @@ int sys_execve(const char* fname, char** argv, char** env)
* return value. */
tid_t wait(int32_t* result)
{
task_t* curr_task = per_core(current_task);
wait_msg_t tmp = { -1, -1};
/*
* idle tasks are not allowed to wait for another task
* they should always run...
*/
if (BUILTIN_EXPECT(per_core(current_task)->status == TASK_IDLE, 0))
if (BUILTIN_EXPECT(curr_task->status == TASK_IDLE, 0))
return -EINVAL;
mailbox_wait_msg_fetch(&per_core(current_task)->inbox, &tmp);
mailbox_wait_msg_fetch(&curr_task->inbox, &tmp);
if (result)
*result = tmp.result;
@ -728,6 +736,7 @@ int block_task(tid_t id)
*/
void scheduler(void)
{
task_t* curr_task;
unsigned int i;
unsigned int new_id;
@ -735,22 +744,24 @@ void scheduler(void)
spinlock_irqsave_lock(&table_lock);
#endif
curr_task = per_core(current_task);
/* signalize that this task could be reused */
if (per_core(current_task)->status == TASK_FINISHED)
per_core(current_task)->status = TASK_INVALID;
if (curr_task->status == TASK_FINISHED)
curr_task->status = TASK_INVALID;
/* if the task is using the FPU, we need to save the FPU context */
if (per_core(current_task)->flags & TASK_FPU_USED) {
save_fpu_state(&(per_core(current_task)->fpu));
per_core(current_task)->flags &= ~TASK_FPU_USED;
if (curr_task->flags & TASK_FPU_USED) {
save_fpu_state(&(curr_task->fpu));
curr_task->flags &= ~TASK_FPU_USED;
}
for(i=1, new_id=(per_core(current_task)->id + 1) % MAX_TASKS;
for(i=1, new_id=(curr_task->id + 1) % MAX_TASKS;
i<MAX_TASKS; i++, new_id=(new_id+1) % MAX_TASKS)
{
if (task_table[new_id].status == TASK_READY) {
if (per_core(current_task)->status == TASK_RUNNING)
per_core(current_task)->status = TASK_READY;
if (curr_task->status == TASK_RUNNING)
curr_task->status = TASK_READY;
task_table[new_id].status = TASK_RUNNING;
per_core(current_task) = task_table+new_id;
@ -758,7 +769,7 @@ void scheduler(void)
}
}
if ((per_core(current_task)->status == TASK_RUNNING) || (per_core(current_task)->status == TASK_IDLE))
if ((curr_task->status == TASK_RUNNING) || (curr_task->status == TASK_IDLE))
goto get_task_out;
/*