From 93257508ee71ee59e42c1e5f6ac0fe9388bf1dc4 Mon Sep 17 00:00:00 2001 From: Stefan Lankes Date: Wed, 17 Aug 2011 15:09:59 +0200 Subject: [PATCH] wakeup a blocked task on the core, which the task used during the last time slice --- include/metalsvm/tasks_types.h | 2 ++ kernel/tasks.c | 17 +++++++---------- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/include/metalsvm/tasks_types.h b/include/metalsvm/tasks_types.h index 784167a7..3981f15c 100644 --- a/include/metalsvm/tasks_types.h +++ b/include/metalsvm/tasks_types.h @@ -78,6 +78,8 @@ typedef struct task { struct task* next; /// previous task in the queue struct task* prev; + /// last core id on which the task was running + uint32_t last_core; /// Usage in number of pages atomic_int32_t user_usage; /// Avoids concurrent access to the page directory diff --git a/kernel/tasks.c b/kernel/tasks.c index 115f5be9..c6874153 100644 --- a/kernel/tasks.c +++ b/kernel/tasks.c @@ -47,8 +47,8 @@ * A task's id will be its position in this array. */ static task_t task_table[MAX_TASKS] = { \ - [0] = {0, TASK_IDLE, 0, 0, 0, NULL, NULL, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, 0, 0, 0, 0}, \ - [1 ... MAX_TASKS-1] = {0, TASK_INVALID, 0, 0, 0, NULL, NULL, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, 0, 0, 0, 0}}; + [0] = {0, TASK_IDLE, 0, 0, 0, NULL, NULL, 0, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, 0, 0, 0, 0}, \ + [1 ... MAX_TASKS-1] = {0, TASK_INVALID, 0, 0, 0, NULL, NULL, 0, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, 0, 0, 0, 0}}; static spinlock_irqsave_t table_lock = SPINLOCK_IRQSAVE_INIT; static runqueue_t runqueues[MAX_CORES] = { \ [0] = {task_table+0, NULL, 0, {[0 ... MAX_PRIO-1] = {NULL, NULL}}, {NULL, NULL}, SPINLOCK_INIT}, \ @@ -107,6 +107,7 @@ size_t get_idle_task(uint32_t id) task_table[id].status = TASK_IDLE; task_table[id].prio = IDLE_PRIO; task_table[id].flags = TASK_DEFAULT_FLAGS; + task_table[id].last_core = id; atomic_int32_set(&task_table[id].user_usage, 0); mailbox_wait_msg_init(&task_table[id].inbox); memset(task_table[id].outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS); @@ -263,6 +264,7 @@ static int create_task(tid_t* id, internal_entry_point_t ep, void* arg, uint8_t task_table[i].status = TASK_READY; task_table[i].flags = TASK_DEFAULT_FLAGS; task_table[i].prio = prio; + task_table[i].last_core = 0; spinlock_init(&task_table[i].vma_lock); task_table[i].vma_list = NULL; mailbox_wait_msg_init(&task_table[i].inbox); @@ -282,21 +284,14 @@ static int create_task(tid_t* id, internal_entry_point_t ep, void* arg, uint8_t // add task in the runqueue spinlock_lock(&runqueues[core_id].lock); runqueues[core_id].prio_bitmap |= (1 << prio); - kprintf("prio %d %p\n", prio, runqueues[core_id].queue[prio-1].first); if (!runqueues[core_id].queue[prio-1].first) { task_table[i].prev = NULL; - kputs("A"); runqueues[core_id].queue[prio-1].first = task_table+i; - kputs("B"); runqueues[core_id].queue[prio-1].last = task_table+i; - kputs("C"); task_table[i].next = NULL; } else { - kputs("D"); task_table[i].prev = runqueues[core_id].queue[prio-1].last; - kputs("E"); runqueues[core_id].queue[prio-1].last->next = task_table+i; - kputs("F"); runqueues[core_id].queue[prio-1].last = task_table+i; task_table[i].next = NULL; } @@ -367,6 +362,7 @@ int sys_fork(void) task_table[i].end_heap = 0; task_table[i].lwip_err = 0; task_table[i].prio = parent_task->prio; + task_table[i].last_core = parent_task->last_core; // add task in the runqueue spinlock_lock(&runqueues[core_id].lock); @@ -858,9 +854,9 @@ int wakeup_task(tid_t id) flags = irq_nested_disable(); - core_id = CORE_ID; task = task_table + id; prio = task->prio; + core_id = task->last_core; if (task_table[id].status == TASK_BLOCKED) { task_table[id].status = TASK_READY; @@ -1023,6 +1019,7 @@ void scheduler(void) uint64_t current_tick; orig_task = curr_task = per_core(current_task); + curr_task->last_core = core_id; /* signalizes that this task could be reused */ if (curr_task->status == TASK_FINISHED)