From fbaf77c8fd8c85300fab0b75e41da21972226e09 Mon Sep 17 00:00:00 2001 From: Stefan Lankes Date: Thu, 22 Sep 2016 18:18:27 +0200 Subject: [PATCH] add counter to the task control block to determine the computation time - the counter represents the time at which the task is scheduled on the cpu - if two or more tasks are running on the same cpu, we use this value to determine an expired time slice => task switching for round-robin scheduling --- hermit/include/hermit/tasks_types.h | 2 ++ hermit/kernel/tasks.c | 24 +++++++++++++++++++++--- 2 files changed, 23 insertions(+), 3 deletions(-) diff --git a/hermit/include/hermit/tasks_types.h b/hermit/include/hermit/tasks_types.h index 73ec059d1..e6bb209ff 100644 --- a/hermit/include/hermit/tasks_types.h +++ b/hermit/include/hermit/tasks_types.h @@ -91,6 +91,8 @@ typedef struct task { uint64_t timeout; /// starting time/tick of the task uint64_t start_tick; + /// last TSC, when the task got the CPU + uint64_t last_tsc; /// the userspace heap vma_t* heap; /// parent thread diff --git a/hermit/kernel/tasks.c b/hermit/kernel/tasks.c index 66b103de1..7906f3e06 100644 --- a/hermit/kernel/tasks.c +++ b/hermit/kernel/tasks.c @@ -54,8 +54,8 @@ extern atomic_int32_t cpu_online; * A task's id will be its position in this array. */ static task_t task_table[MAX_TASKS] = { \ - [0] = {0, TASK_IDLE, 0, NULL, NULL, NULL, TASK_DEFAULT_FLAGS, 0, 0, 0, NULL, 0, NULL, NULL, 0, 0, 0, NULL, FPU_STATE_INIT}, \ - [1 ... MAX_TASKS-1] = {0, TASK_INVALID, 0, NULL, NULL, NULL, TASK_DEFAULT_FLAGS, 0, 0, 0, NULL, 0, NULL, NULL, 0, 0, 0, NULL, FPU_STATE_INIT}}; + [0] = {0, TASK_IDLE, 0, NULL, NULL, NULL, TASK_DEFAULT_FLAGS, 0, 0, 0, 0, NULL, 0, NULL, NULL, 0, 0, 0, NULL, FPU_STATE_INIT}, \ + [1 ... MAX_TASKS-1] = {0, TASK_INVALID, 0, NULL, NULL, NULL, TASK_DEFAULT_FLAGS, 0, 0, 0, 0, NULL, 0, NULL, NULL, 0, 0, 0, NULL, FPU_STATE_INIT}}; static spinlock_irqsave_t table_lock = SPINLOCK_IRQSAVE_INIT; @@ -200,8 +200,23 @@ void check_scheduling(void) if (!is_irq_enabled()) return; - if (get_highest_priority() > per_core(current_task)->prio) + uint32_t prio = get_highest_priority(); + task_t* curr_task = per_core(current_task); + + if (prio > curr_task->prio) { reschedule(); + } else if (prio == curr_task->prio) { + // if a task is ready, check if the current task runs already one tick (one time slice) + // => reschedule to realize round robin + + const uint64_t diff_cycles = get_rdtsc() - curr_task->last_tsc; + const uint64_t cpu_freq_hz = 1000000ULL * (uint64_t) get_cpu_frequency(); + + if (((diff_cycles * (uint64_t) TIMER_FREQ) / cpu_freq_hz) > 0) { + //kprintf("Time slice expired for task %d on core %d. New task has priority %u.\n", curr_task->id, CORE_ID, prio); + reschedule(); + } + } } @@ -495,6 +510,7 @@ int clone_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio) task_table[i].prio = prio; task_table[i].heap = curr_task->heap; task_table[i].start_tick = get_clock_tick(); + task_table[i].last_tsc = 0; task_table[i].parent = curr_task->id; task_table[i].tls_addr = curr_task->tls_addr; task_table[i].tls_size = curr_task->tls_size; @@ -597,6 +613,7 @@ int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio, uint32_t c task_table[i].prio = prio; task_table[i].heap = NULL; task_table[i].start_tick = get_clock_tick(); + task_table[i].last_tsc = 0; task_table[i].parent = 0; task_table[i].ist_addr = ist; task_table[i].tls_addr = 0; @@ -862,6 +879,7 @@ size_t** scheduler(void) // finally make it the new current task curr_task->status = TASK_RUNNING; + curr_task->last_tsc = get_rdtsc(); set_per_core(current_task, curr_task); }