- prepare MetalSVM for multicore environments
- create the variable current_task on each core git-svn-id: http://svn.lfbs.rwth-aachen.de/svn/scc/trunk/MetalSVM@159 315a16e6-25f9-4109-90ae-ca3045a26c18
This commit is contained in:
parent
c4ed163f9d
commit
b0e600da99
9 changed files with 61 additions and 39 deletions
|
@ -472,7 +472,7 @@ global irq14
|
|||
global irq15
|
||||
|
||||
extern irq_handler
|
||||
extern current_task
|
||||
extern get_current_task
|
||||
extern scheduler
|
||||
|
||||
global reschedule
|
||||
|
@ -482,10 +482,11 @@ reschedule:
|
|||
; => we have not to save the original eax value
|
||||
push ebx
|
||||
|
||||
push DWORD [current_task]
|
||||
call get_current_task
|
||||
push eax
|
||||
call scheduler
|
||||
call get_current_task
|
||||
pop ebx
|
||||
mov eax, DWORD [current_task]
|
||||
cmp eax, ebx
|
||||
je no_task_switch1
|
||||
|
||||
|
@ -515,11 +516,11 @@ irq0:
|
|||
call irq_handler
|
||||
add esp, 4
|
||||
|
||||
mov eax, DWORD [current_task]
|
||||
call get_current_task
|
||||
push eax
|
||||
call scheduler
|
||||
call get_current_task
|
||||
pop ebx
|
||||
mov eax, DWORD [current_task]
|
||||
cmp eax, ebx
|
||||
je no_task_switch2
|
||||
|
||||
|
|
|
@ -71,9 +71,9 @@ next_try:
|
|||
s->value--;
|
||||
spinlock_unlock(&s->lock);
|
||||
} else {
|
||||
s->queue[s->pos] = current_task->id;
|
||||
s->queue[s->pos] = per_core(current_task)->id;
|
||||
s->pos = (s->pos + 1) % MAX_TASKS;
|
||||
block_task(current_task->id);
|
||||
block_task(per_core(current_task)->id);
|
||||
spinlock_unlock(&s->lock);
|
||||
reschedule();
|
||||
goto next_try;
|
||||
|
|
|
@ -61,7 +61,7 @@ inline static int spinlock_lock(spinlock_t* s) {
|
|||
ticket = atomic_int32_inc(&s->queue);
|
||||
while(atomic_int32_read(&s->dequeue) != ticket)
|
||||
reschedule();
|
||||
s->owner = current_task->id;
|
||||
s->owner = per_core(current_task)->id;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -97,7 +97,7 @@ inline static int spinlock_unlock_irqsave(spinlock_t* s) {
|
|||
}
|
||||
|
||||
inline static int spinlock_has_lock(spinlock_t* s) {
|
||||
return (s->owner == current_task->id);
|
||||
return (s->owner == per_core(current_task)->id);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -29,6 +29,19 @@ extern "C" {
|
|||
|
||||
#define NULL ((void*) 0)
|
||||
|
||||
#if MAX_CORES == 1
|
||||
#define per_core(name) name
|
||||
#define DECLARE_PER_CORE(type, name) extern type name;
|
||||
#define DEFINE_PER_CORE(type, name, def_value) type name = def_value;
|
||||
#else
|
||||
#define per_core(name) name[0].var
|
||||
#define DECLARE_PER_CORE(type, name) \
|
||||
typedef struct { type var __attribute__ ((aligned (CACHE_LINE))); } aligned_##name;\
|
||||
extern aligned_##name name[MAX_CORES];
|
||||
#define DEFINE_PER_CORE(type, name, def_value) \
|
||||
aligned_##name name[MAX_CORES] = {[0 ... MAX_CORES-1] = {def_value}};
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -29,8 +29,8 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* task, which is currently running */
|
||||
extern task_t* current_task;
|
||||
/* tasks, which are currently running */
|
||||
DECLARE_PER_CORE(task_t*, current_task);
|
||||
|
||||
int multitasking_init(void);
|
||||
|
||||
|
|
|
@ -171,7 +171,7 @@ int main(void)
|
|||
create_kernel_task(&pc_id, scc_pc_task, NULL);
|
||||
#endif
|
||||
|
||||
current_task->status = TASK_IDLE;
|
||||
per_core(current_task)->status = TASK_IDLE;
|
||||
reschedule();
|
||||
|
||||
while(1) {
|
||||
|
|
|
@ -64,7 +64,7 @@ int syscall_handler(uint32_t sys_nr, ...)
|
|||
ret = 0;
|
||||
break;
|
||||
case __NR_getpid:
|
||||
ret = current_task->id;
|
||||
ret = per_core(current_task)->id;
|
||||
break;
|
||||
case __NR_fstat:
|
||||
default:
|
||||
|
|
|
@ -28,10 +28,18 @@
|
|||
#include <metalsvm/mailbox.h>
|
||||
#include <metalsvm/syscall.h>
|
||||
|
||||
task_t* current_task = NULL;
|
||||
DEFINE_PER_CORE(task_t*, current_task, NULL);
|
||||
static task_t task_table[MAX_TASKS] = {[0 ... MAX_TASKS-1] = {0, 0, NULL, 0, ATOMIC_INIT(0)}};
|
||||
static spinlock_t table_lock = SPINLOCK_INIT;
|
||||
|
||||
/*
|
||||
* helper function for the assembly code to determine the current task
|
||||
*/
|
||||
task_t* get_current_task(void)
|
||||
{
|
||||
return per_core(current_task);
|
||||
}
|
||||
|
||||
int multitasking_init(void) {
|
||||
unsigned int i;
|
||||
|
||||
|
@ -40,8 +48,8 @@ int multitasking_init(void) {
|
|||
task_table[i].id = i;
|
||||
task_table[i].status = TASK_RUNNING;
|
||||
memset(task_table[i].mbox, 0x00, sizeof(mailbox_int32_t*)*MAX_TASKS);
|
||||
current_task = task_table+i;
|
||||
register_task(current_task);
|
||||
per_core(current_task) = task_table+i;
|
||||
register_task(per_core(current_task));
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
@ -57,9 +65,9 @@ static void wakeup_blocked_tasks(int result)
|
|||
|
||||
/* wake up blocked tasks */
|
||||
for(i=0; i<MAX_TASKS; i++) {
|
||||
if (current_task->mbox[i]) {
|
||||
mailbox_int32_post(current_task->mbox[i], result);
|
||||
current_task->mbox[i] = NULL;
|
||||
if (per_core(current_task)->mbox[i]) {
|
||||
mailbox_int32_post(per_core(current_task)->mbox[i], result);
|
||||
per_core(current_task)->mbox[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -67,14 +75,14 @@ static void wakeup_blocked_tasks(int result)
|
|||
}
|
||||
|
||||
static void NORETURN do_exit(int arg) {
|
||||
kprintf("Terminate task: %u, return value %d\n", current_task->id, arg);
|
||||
kprintf("Terminate task: %u, return value %d\n", per_core(current_task)->id, arg);
|
||||
|
||||
wakeup_blocked_tasks(arg);
|
||||
if (current_task->ustack)
|
||||
kfree(current_task->ustack, current_task->stack_size);
|
||||
if (atomic_int32_read(¤t_task->mem_usage))
|
||||
kprintf("Memory leak! Task %d did not release %d bytes\n", current_task->id, atomic_int32_read(¤t_task->mem_usage));
|
||||
current_task->status = TASK_FINISHED;
|
||||
if (per_core(current_task)->ustack)
|
||||
kfree(per_core(current_task)->ustack, per_core(current_task)->stack_size);
|
||||
if (atomic_int32_read(&per_core(current_task)->mem_usage))
|
||||
kprintf("Memory leak! Task %d did not release %d bytes\n", per_core(current_task)->id, atomic_int32_read(&per_core(current_task)->mem_usage));
|
||||
per_core(current_task)->status = TASK_FINISHED;
|
||||
reschedule();
|
||||
|
||||
kputs("Kernel panic: scheduler found no valid task\n");
|
||||
|
@ -96,7 +104,7 @@ void NORETURN leave_user_task(void) {
|
|||
result = get_return_value();
|
||||
SYSCALL1(__NR_exit, result);
|
||||
|
||||
kprintf("Kernel panic! Task %d comes back from syscall \"exit\"\n", current_task->id);
|
||||
kprintf("Kernel panic! Task %d comes back from syscall \"exit\"\n", per_core(current_task)->id);
|
||||
|
||||
while(1) {
|
||||
NOP8;
|
||||
|
@ -179,11 +187,11 @@ int join_task(tid_t id, int* result)
|
|||
* idle tasks are not allowed to wait for another task
|
||||
* they should always run...
|
||||
*/
|
||||
if (BUILTIN_EXPECT(current_task->status == TASK_IDLE, 0))
|
||||
if (BUILTIN_EXPECT(per_core(current_task)->status == TASK_IDLE, 0))
|
||||
goto join_out;
|
||||
|
||||
/* a task is not able to wait for itself */
|
||||
if (BUILTIN_EXPECT(current_task->id == id, 0))
|
||||
if (BUILTIN_EXPECT(per_core(current_task)->id == id, 0))
|
||||
goto join_out;
|
||||
|
||||
/* invalid id */
|
||||
|
@ -198,7 +206,7 @@ int join_task(tid_t id, int* result)
|
|||
if (BUILTIN_EXPECT(task_table[id].status == TASK_FINISHED, 0))
|
||||
goto join_out;
|
||||
|
||||
task_table[id].mbox[current_task->id] = &mbox;
|
||||
task_table[id].mbox[per_core(current_task)->id] = &mbox;
|
||||
|
||||
spinlock_unlock_irqsave(&table_lock);
|
||||
|
||||
|
@ -267,23 +275,23 @@ void scheduler(void)
|
|||
spinlock_lock(&table_lock);
|
||||
|
||||
/* signalize that this task could be reuse */
|
||||
if (current_task->status == TASK_FINISHED)
|
||||
current_task->status = TASK_INVALID;
|
||||
if (per_core(current_task)->status == TASK_FINISHED)
|
||||
per_core(current_task)->status = TASK_INVALID;
|
||||
|
||||
for(i=1; i<MAX_TASKS; i++) {
|
||||
new_id = (current_task->id + i) % MAX_TASKS;
|
||||
new_id = (per_core(current_task)->id + i) % MAX_TASKS;
|
||||
|
||||
if (task_table[new_id].status == TASK_READY) {
|
||||
if (current_task->status == TASK_RUNNING)
|
||||
current_task->status = TASK_READY;
|
||||
if (per_core(current_task)->status == TASK_RUNNING)
|
||||
per_core(current_task)->status = TASK_READY;
|
||||
task_table[new_id].status = TASK_RUNNING;
|
||||
|
||||
current_task = task_table+new_id;
|
||||
per_core(current_task) = task_table+new_id;
|
||||
goto get_task_out;
|
||||
}
|
||||
}
|
||||
|
||||
if ((current_task->status == TASK_RUNNING) || (current_task->status == TASK_IDLE))
|
||||
if ((per_core(current_task)->status == TASK_RUNNING) || (per_core(current_task)->status == TASK_IDLE))
|
||||
goto get_task_out;
|
||||
|
||||
/*
|
||||
|
@ -292,7 +300,7 @@ void scheduler(void)
|
|||
*/
|
||||
for(i=0; i<MAX_TASKS; i++) {
|
||||
if (task_table[i].status == TASK_IDLE) {
|
||||
current_task = task_table+i;
|
||||
per_core(current_task) = task_table+i;
|
||||
goto get_task_out;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -222,7 +222,7 @@ oom:
|
|||
|
||||
void* kmalloc(size_t sz)
|
||||
{
|
||||
return task_malloc(current_task, sz);
|
||||
return task_malloc(per_core(current_task), sz);
|
||||
}
|
||||
|
||||
void* create_stack(task_t* task, size_t sz)
|
||||
|
@ -260,5 +260,5 @@ static void task_free(task_t* task, void* addr, size_t sz)
|
|||
|
||||
void kfree(void* addr, size_t sz)
|
||||
{
|
||||
task_free(current_task, addr, sz);
|
||||
task_free(per_core(current_task), addr, sz);
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue