From 22dab5969937a8512eb73e883cec5366e01df428 Mon Sep 17 00:00:00 2001 From: Stefan Lankes Date: Wed, 3 Aug 2011 15:10:58 +0200 Subject: [PATCH] minor changes in the scheduler routine to relalize a fair round-robin scheduling --- kernel/tasks.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/kernel/tasks.c b/kernel/tasks.c index 6800e662..00e23098 100644 --- a/kernel/tasks.c +++ b/kernel/tasks.c @@ -789,6 +789,15 @@ int block_task(tid_t id) return ret; } +/* + * we use this struct to guarantee that the id + * has its own cache line + */ +typedef struct { + uint32_t id __attribute__ ((aligned (CACHE_LINE))); + uint8_t gap[CACHE_LINE-sizeof(uint32_t)]; +} last_id_t; + /** @brief _The_ scheduler procedure * * Manages scheduling - right now this is just a round robin scheduler. @@ -797,8 +806,9 @@ void scheduler(void) { task_t* orig_task; task_t* curr_task; - unsigned int i; - unsigned int new_id; + uint32_t i; + uint32_t new_id; + static last_id_t last_id = { 0 }; #if MAX_CORES > 1 spinlock_irqsave_lock(&table_lock); @@ -816,7 +826,7 @@ void scheduler(void) curr_task->flags &= ~TASK_FPU_USED; } - for(i=1, new_id=(curr_task->id + 1) % MAX_TASKS; + for(i=1, new_id=(last_id.id + 1) % MAX_TASKS; istatus = TASK_READY; task_table[new_id].status = TASK_RUNNING; curr_task = per_core(current_task) = task_table+new_id; + last_id.id = new_id; goto get_task_out; }