From 22441375bb2253b0f1f82f6ef5e4d6fb4cade0dd Mon Sep 17 00:00:00 2001 From: Stefan Lankes Date: Wed, 3 Aug 2011 21:41:06 +0200 Subject: [PATCH] add blocking timers only the idle tasks use the polling mode --- arch/x86/kernel/timer.c | 56 ++++++++++++++++++++++++++++++++++------- 1 file changed, 47 insertions(+), 9 deletions(-) diff --git a/arch/x86/kernel/timer.c b/arch/x86/kernel/timer.c index 251f71f8..66c4ac82 100644 --- a/arch/x86/kernel/timer.c +++ b/arch/x86/kernel/timer.c @@ -23,17 +23,26 @@ #include #include #include +#include #include #include #include #include #include +typedef struct { + uint8_t active; + uint64_t timeout; +} timer_t; + +static timer_t timers[MAX_TASKS] = {[0 ... MAX_TASKS-1] = {0, 0}}; +static spinlock_irqsave_t timers_lock = SPINLOCK_IRQSAVE_INIT; + /* * This will keep track of how many ticks the system * has been running for */ -static volatile uint64_t timer_ticks __attribute__ ((aligned (CACHE_LINE))) = 0; +static volatile uint64_t timer_ticks = 0; uint64_t get_clock_tick(void) { @@ -60,13 +69,24 @@ int sys_times(struct tms* buffer, clock_t* clock) */ static void timer_handler(struct state *s) { + uint32_t i; + /* Increment our 'tick counter' */ #if MAX_CORES > 1 if (smp_id() == 0) - timer_ticks++; -#else - timer_ticks++; #endif + { + timer_ticks++; + + spinlock_irqsave_lock(&timers_lock); + for(i=1; i= timers[i].timeout)) { + timers[i].active = 0; + wakeup_task(i); + } + } + spinlock_irqsave_unlock(&timers_lock); + } /* * Every TIMER_FREQ clocks (approximately 1 second), we will @@ -84,15 +104,33 @@ static void timer_handler(struct state *s) void timer_wait(unsigned int ticks) { uint64_t eticks = timer_ticks + ticks; + task_t* curr_task = per_core(current_task); - while (timer_ticks < eticks) { + // Task 0 is always an idle task + // Perhaps, the status is not set correctly... + if ((curr_task->status == TASK_IDLE) || (curr_task->id == 0)) + { + while (timer_ticks < eticks) { + check_workqueues(); + + // recheck break condition + if (timer_ticks >= eticks) + break; + + HALT; + } + } else if (timer_ticks < eticks) { check_workqueues(); - // recheck break condition - if (timer_ticks >= eticks) - break; + spinlock_irqsave_lock(&timers_lock); + if (timer_ticks < eticks) { + timers[curr_task->id].active = 1; + timers[curr_task->id].timeout = eticks; + block_task(curr_task->id); + spinlock_irqsave_unlock(&timers_lock); - reschedule(); + reschedule(); + } else spinlock_irqsave_unlock(&timers_lock); } }