diff --git a/arch/x86/kernel/entry.asm b/arch/x86/kernel/entry.asm index b22ee610..58ce63ad 100644 --- a/arch/x86/kernel/entry.asm +++ b/arch/x86/kernel/entry.asm @@ -472,7 +472,7 @@ global irq14 global irq15 extern irq_handler -extern current_task +extern get_current_task extern scheduler global reschedule @@ -482,10 +482,11 @@ reschedule: ; => we have not to save the original eax value push ebx - push DWORD [current_task] + call get_current_task + push eax call scheduler + call get_current_task pop ebx - mov eax, DWORD [current_task] cmp eax, ebx je no_task_switch1 @@ -515,11 +516,11 @@ irq0: call irq_handler add esp, 4 - mov eax, DWORD [current_task] + call get_current_task push eax call scheduler + call get_current_task pop ebx - mov eax, DWORD [current_task] cmp eax, ebx je no_task_switch2 diff --git a/include/metalsvm/semaphore.h b/include/metalsvm/semaphore.h index 57af6473..cc2497e6 100644 --- a/include/metalsvm/semaphore.h +++ b/include/metalsvm/semaphore.h @@ -71,9 +71,9 @@ next_try: s->value--; spinlock_unlock(&s->lock); } else { - s->queue[s->pos] = current_task->id; + s->queue[s->pos] = per_core(current_task)->id; s->pos = (s->pos + 1) % MAX_TASKS; - block_task(current_task->id); + block_task(per_core(current_task)->id); spinlock_unlock(&s->lock); reschedule(); goto next_try; diff --git a/include/metalsvm/spinlock.h b/include/metalsvm/spinlock.h index 50648c4a..fe4dc725 100644 --- a/include/metalsvm/spinlock.h +++ b/include/metalsvm/spinlock.h @@ -61,7 +61,7 @@ inline static int spinlock_lock(spinlock_t* s) { ticket = atomic_int32_inc(&s->queue); while(atomic_int32_read(&s->dequeue) != ticket) reschedule(); - s->owner = current_task->id; + s->owner = per_core(current_task)->id; return 0; } @@ -97,7 +97,7 @@ inline static int spinlock_unlock_irqsave(spinlock_t* s) { } inline static int spinlock_has_lock(spinlock_t* s) { - return (s->owner == current_task->id); + return (s->owner == per_core(current_task)->id); } #ifdef __cplusplus diff --git a/include/metalsvm/stddef.h b/include/metalsvm/stddef.h index 01ec7a3e..6fae8abb 100644 --- a/include/metalsvm/stddef.h +++ b/include/metalsvm/stddef.h @@ -29,6 +29,19 @@ extern "C" { #define NULL ((void*) 0) +#if MAX_CORES == 1 +#define per_core(name) name +#define DECLARE_PER_CORE(type, name) extern type name; +#define DEFINE_PER_CORE(type, name, def_value) type name = def_value; +#else +#define per_core(name) name[0].var +#define DECLARE_PER_CORE(type, name) \ + typedef struct { type var __attribute__ ((aligned (CACHE_LINE))); } aligned_##name;\ + extern aligned_##name name[MAX_CORES]; +#define DEFINE_PER_CORE(type, name, def_value) \ + aligned_##name name[MAX_CORES] = {[0 ... MAX_CORES-1] = {def_value}}; +#endif + #ifdef __cplusplus } #endif diff --git a/include/metalsvm/tasks.h b/include/metalsvm/tasks.h index 8694f2c7..9dd16714 100644 --- a/include/metalsvm/tasks.h +++ b/include/metalsvm/tasks.h @@ -29,8 +29,8 @@ extern "C" { #endif -/* task, which is currently running */ -extern task_t* current_task; +/* tasks, which are currently running */ +DECLARE_PER_CORE(task_t*, current_task); int multitasking_init(void); diff --git a/kernel/main.c b/kernel/main.c index 20b45b11..292a981d 100644 --- a/kernel/main.c +++ b/kernel/main.c @@ -171,7 +171,7 @@ int main(void) create_kernel_task(&pc_id, scc_pc_task, NULL); #endif - current_task->status = TASK_IDLE; + per_core(current_task)->status = TASK_IDLE; reschedule(); while(1) { diff --git a/kernel/syscall.c b/kernel/syscall.c index 3e1c72e5..4997f0d6 100644 --- a/kernel/syscall.c +++ b/kernel/syscall.c @@ -64,7 +64,7 @@ int syscall_handler(uint32_t sys_nr, ...) ret = 0; break; case __NR_getpid: - ret = current_task->id; + ret = per_core(current_task)->id; break; case __NR_fstat: default: diff --git a/kernel/tasks.c b/kernel/tasks.c index 5b081a47..e3f26b64 100644 --- a/kernel/tasks.c +++ b/kernel/tasks.c @@ -28,10 +28,18 @@ #include #include -task_t* current_task = NULL; +DEFINE_PER_CORE(task_t*, current_task, NULL); static task_t task_table[MAX_TASKS] = {[0 ... MAX_TASKS-1] = {0, 0, NULL, 0, ATOMIC_INIT(0)}}; static spinlock_t table_lock = SPINLOCK_INIT; +/* + * helper function for the assembly code to determine the current task + */ +task_t* get_current_task(void) +{ + return per_core(current_task); +} + int multitasking_init(void) { unsigned int i; @@ -40,8 +48,8 @@ int multitasking_init(void) { task_table[i].id = i; task_table[i].status = TASK_RUNNING; memset(task_table[i].mbox, 0x00, sizeof(mailbox_int32_t*)*MAX_TASKS); - current_task = task_table+i; - register_task(current_task); + per_core(current_task) = task_table+i; + register_task(per_core(current_task)); return 0; } } @@ -57,9 +65,9 @@ static void wakeup_blocked_tasks(int result) /* wake up blocked tasks */ for(i=0; imbox[i]) { - mailbox_int32_post(current_task->mbox[i], result); - current_task->mbox[i] = NULL; + if (per_core(current_task)->mbox[i]) { + mailbox_int32_post(per_core(current_task)->mbox[i], result); + per_core(current_task)->mbox[i] = NULL; } } @@ -67,14 +75,14 @@ static void wakeup_blocked_tasks(int result) } static void NORETURN do_exit(int arg) { - kprintf("Terminate task: %u, return value %d\n", current_task->id, arg); + kprintf("Terminate task: %u, return value %d\n", per_core(current_task)->id, arg); wakeup_blocked_tasks(arg); - if (current_task->ustack) - kfree(current_task->ustack, current_task->stack_size); - if (atomic_int32_read(¤t_task->mem_usage)) - kprintf("Memory leak! Task %d did not release %d bytes\n", current_task->id, atomic_int32_read(¤t_task->mem_usage)); - current_task->status = TASK_FINISHED; + if (per_core(current_task)->ustack) + kfree(per_core(current_task)->ustack, per_core(current_task)->stack_size); + if (atomic_int32_read(&per_core(current_task)->mem_usage)) + kprintf("Memory leak! Task %d did not release %d bytes\n", per_core(current_task)->id, atomic_int32_read(&per_core(current_task)->mem_usage)); + per_core(current_task)->status = TASK_FINISHED; reschedule(); kputs("Kernel panic: scheduler found no valid task\n"); @@ -96,7 +104,7 @@ void NORETURN leave_user_task(void) { result = get_return_value(); SYSCALL1(__NR_exit, result); - kprintf("Kernel panic! Task %d comes back from syscall \"exit\"\n", current_task->id); + kprintf("Kernel panic! Task %d comes back from syscall \"exit\"\n", per_core(current_task)->id); while(1) { NOP8; @@ -179,11 +187,11 @@ int join_task(tid_t id, int* result) * idle tasks are not allowed to wait for another task * they should always run... */ - if (BUILTIN_EXPECT(current_task->status == TASK_IDLE, 0)) + if (BUILTIN_EXPECT(per_core(current_task)->status == TASK_IDLE, 0)) goto join_out; /* a task is not able to wait for itself */ - if (BUILTIN_EXPECT(current_task->id == id, 0)) + if (BUILTIN_EXPECT(per_core(current_task)->id == id, 0)) goto join_out; /* invalid id */ @@ -198,7 +206,7 @@ int join_task(tid_t id, int* result) if (BUILTIN_EXPECT(task_table[id].status == TASK_FINISHED, 0)) goto join_out; - task_table[id].mbox[current_task->id] = &mbox; + task_table[id].mbox[per_core(current_task)->id] = &mbox; spinlock_unlock_irqsave(&table_lock); @@ -267,23 +275,23 @@ void scheduler(void) spinlock_lock(&table_lock); /* signalize that this task could be reuse */ - if (current_task->status == TASK_FINISHED) - current_task->status = TASK_INVALID; + if (per_core(current_task)->status == TASK_FINISHED) + per_core(current_task)->status = TASK_INVALID; for(i=1; iid + i) % MAX_TASKS; + new_id = (per_core(current_task)->id + i) % MAX_TASKS; if (task_table[new_id].status == TASK_READY) { - if (current_task->status == TASK_RUNNING) - current_task->status = TASK_READY; + if (per_core(current_task)->status == TASK_RUNNING) + per_core(current_task)->status = TASK_READY; task_table[new_id].status = TASK_RUNNING; - current_task = task_table+new_id; + per_core(current_task) = task_table+new_id; goto get_task_out; } } - if ((current_task->status == TASK_RUNNING) || (current_task->status == TASK_IDLE)) + if ((per_core(current_task)->status == TASK_RUNNING) || (per_core(current_task)->status == TASK_IDLE)) goto get_task_out; /* @@ -292,7 +300,7 @@ void scheduler(void) */ for(i=0; i