diff --git a/hermit/Makefile b/hermit/Makefile index be77d5039..b53a9b606 100644 --- a/hermit/Makefile +++ b/hermit/Makefile @@ -2,7 +2,8 @@ TERM = xterm TOPDIR = $(shell pwd) ARCH = x86 NAME = hermit -KERNDIRS = kernel mm libkern arch/$(ARCH)/kernel arch/$(ARCH)/mm +LWIPDIRS = lwip/src/arch lwip/src/api lwip/src/core lwip/src/core/ipv4 lwip/src/netif +KERNDIRS = kernel mm libkern arch/$(ARCH)/kernel arch/$(ARCH)/mm $(LWIPDIRS) SUBDIRS = $(KERNDIRS) # Set your own cross compiler tool chain prefix here @@ -38,7 +39,7 @@ QEMUDEBUGFLAGS = -monitor none -daemonize \ QEMUSERIALFLAGS = -device pci-serial,chardev=tS0 \ -chardev socket,host=localhost,port=4555,server,id=tS0 -INCLUDE = -I$(TOPDIR)/include -I$(TOPDIR)/arch/$(ARCH)/include +INCLUDE = -I$(TOPDIR)/include -I$(TOPDIR)/arch/$(ARCH)/include -I$(TOPDIR)/lwip/src/include -I$(TOPDIR)/lwip/src/include/ipv4 # Compiler options for final code CFLAGS = -g -m64 -Wall -O2 -mno-red-zone -fstrength-reduce -fomit-frame-pointer -finline-functions -ffreestanding -nostdinc -fno-stack-protector $(INCLUDE) # Compiler options for debugging diff --git a/hermit/arch/x86/kernel/irq.c b/hermit/arch/x86/kernel/irq.c index cfb5807b7..b029488cc 100644 --- a/hermit/arch/x86/kernel/irq.c +++ b/hermit/arch/x86/kernel/irq.c @@ -241,6 +241,8 @@ size_t** irq_handler(struct state *s) /* This is a blank function pointer */ void (*handler) (struct state * s); + check_workqueues_in_irqhandler(s->int_no); + /* * Find out if we have a custom handler to run for this * IRQ and then finally, run it diff --git a/hermit/arch/x86/kernel/processor.c b/hermit/arch/x86/kernel/processor.c index 609195bf2..6d0bafb04 100644 --- a/hermit/arch/x86/kernel/processor.c +++ b/hermit/arch/x86/kernel/processor.c @@ -228,3 +228,16 @@ uint32_t get_cpu_frequency(void) return detect_cpu_frequency(); } +void udelay(uint32_t usecs) +{ + uint64_t diff, end, start = rdtsc(); + uint64_t deadline = get_cpu_frequency() * usecs; + + do { + mb(); + end = rdtsc(); + diff = end > start ? end - start : start - end; + if ((diff < deadline) && (deadline - diff > 50000)) + check_workqueues(); + } while(diff < deadline); +} diff --git a/hermit/arch/x86/kernel/timer.c b/hermit/arch/x86/kernel/timer.c index 1d9467627..f2e6c95d3 100644 --- a/hermit/arch/x86/kernel/timer.c +++ b/hermit/arch/x86/kernel/timer.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -65,6 +66,39 @@ static void timer_handler(struct state *s) }*/ } +int timer_wait(unsigned int ticks) +{ + uint64_t eticks = timer_ticks + ticks; + + task_t* curr_task = per_core(current_task); + + if (curr_task->status == TASK_IDLE) + { + /* + * This will continuously loop until the given time has + * been reached + */ + while (timer_ticks < eticks) { + check_workqueues(); + + // recheck break condition + if (timer_ticks >= eticks) + break; + + HALT; + } + } else if (timer_ticks < eticks) { + check_workqueues(); + + if (timer_ticks < eticks) { + set_timer(eticks); + reschedule(); + } + } + + return 0; +} + #define LATCH(f) ((CLOCK_TICK_RATE + f/2) / f) #define WAIT_SOME_TIME() do { uint64_t start = rdtsc(); \ while(rdtsc() - start < 1000000) ; \ diff --git a/hermit/include/hermit/tasks.h b/hermit/include/hermit/tasks.h index 8e8a7bb83..be19f151e 100644 --- a/hermit/include/hermit/tasks.h +++ b/hermit/include/hermit/tasks.h @@ -176,12 +176,51 @@ int wakeup_task(tid_t); */ int block_current_task(void); +/** @brief Block current task until timer expires + * + * @param deadline Clock tick, when the timer expires + * @return + * - 0 on success + * - -EINVAL (-22) on failure + */ +int set_timer(uint64_t deadline); + +/** @brief check is a timer is expired + * + */ +void check_timers(void); + /** @brief Abort current task */ void NORETURN abort(void); /** @brief This function shall be called by leaving kernel-level tasks */ void NORETURN leave_kernel_task(void); +/** @brief if a task exists with higher priority, MetalSVM switch to it. + * */ +void check_scheduling(void); + +#if 0 +/** @brief check, if the tick counter has to be updated + * */ +void check_ticks(void); +#endif + +static inline void check_workqueues_in_irqhandler(int irq) +{ + //check_ticks(); + check_timers(); + + if (irq < 0) + check_scheduling(); +} + +static inline void check_workqueues(void) +{ + // call with invalid interrupt number + check_workqueues_in_irqhandler(-1); +} + #ifdef __cplusplus } #endif diff --git a/hermit/include/hermit/tasks_types.h b/hermit/include/hermit/tasks_types.h index 9cedcce45..2d6a9288d 100644 --- a/hermit/include/hermit/tasks_types.h +++ b/hermit/include/hermit/tasks_types.h @@ -48,15 +48,16 @@ extern "C" { #endif #define TASK_INVALID 0 -#define TASK_READY 1 +#define TASK_READY 1 #define TASK_RUNNING 2 #define TASK_BLOCKED 3 #define TASK_FINISHED 4 -#define TASK_IDLE 5 +#define TASK_IDLE 5 #define TASK_DEFAULT_FLAGS 0 #define TASK_FPU_INIT (1 << 0) #define TASK_FPU_USED (1 << 1) +#define TASK_TIMER (1 << 2) #define MAX_PRIO 31 #define REALTIME_PRIO 31 @@ -83,6 +84,8 @@ typedef struct task { uint8_t flags; /// Task priority uint8_t prio; + /// timeout for a blocked task + uint64_t timeout; /// Physical address of root page table size_t page_map; /// Lock for page tables @@ -99,6 +102,8 @@ typedef struct task { struct task* next; /// previous task in the queue struct task* prev; + /// LwIP error code + int lwip_err; /// FPU state union fpu_state fpu; } task_t; @@ -120,6 +125,8 @@ typedef struct { uint32_t prio_bitmap; /// a queue for each priority task_list_t queue[MAX_PRIO]; + /// a queue for timers + task_list_t timers; /// lock for this runqueue spinlock_irqsave_t lock; } readyqueues_t; diff --git a/hermit/include/hermit/time.h b/hermit/include/hermit/time.h index 9768e0bcf..d84f889f3 100644 --- a/hermit/include/hermit/time.h +++ b/hermit/include/hermit/time.h @@ -44,11 +44,27 @@ extern "C" { */ int timer_init(void); +/** @brief Initialized a timer + * + * @param ticks Amount of ticks to wait + * @return + * - 0 on success + */ +int timer_wait(unsigned int ticks); + /** @brief Returns the current number of ticks. * @return Current number of ticks */ uint64_t get_clock_tick(void); +/** @brief sleep some seconds + * + * This function sleeps some seconds + * + * @param sec Amount of seconds to wait + */ +static inline void sleep(unsigned int sec) { timer_wait(sec*TIMER_FREQ); } + #ifdef __cplusplus } #endif diff --git a/hermit/kernel/main.c b/hermit/kernel/main.c index 61a1b9177..f881ac7b7 100644 --- a/hermit/kernel/main.c +++ b/hermit/kernel/main.c @@ -34,11 +34,21 @@ #include #include #include - #include #include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + /* * Note that linker symbols are not variables, they have no memory allocated for * maintaining a value, rather their address is their value. @@ -79,6 +89,30 @@ static int hermit_init(void) return 0; } +static void tcpip_init_done(void* arg) +{ + sys_sem_t* sem = (sys_sem_t*)arg; + + kprintf("LwIP's tcpip thread has task id %d\n", per_core(current_task)->id); + + sys_sem_signal(sem); +} + +static int init_netifs(void) +{ + sys_sem_t sem; + + if(sys_sem_new(&sem, 0) != ERR_OK) + LWIP_ASSERT("Failed to create semaphore", 0); + + tcpip_init(tcpip_init_done, &sem); + sys_sem_wait(&sem); + kprintf("TCP/IP initialized.\n"); + sys_sem_free(&sem); + + return 0; +} + int smp_main(void) { int32_t cpu = atomic_int32_inc(&cpu_online); @@ -87,8 +121,8 @@ int smp_main(void) create_kernel_task(NULL, foo, "foo2", NORMAL_PRIO); - flush_tlb(); while(1) { + check_workqueues(); HALT; } @@ -111,7 +145,10 @@ int main(void) create_kernel_task(NULL, foo, "foo1", NORMAL_PRIO); - while(1) { + init_netifs(); + + while(1) { + check_workqueues(); HALT; } diff --git a/hermit/kernel/tasks.c b/hermit/kernel/tasks.c index b37fad3fe..8593c4c9c 100644 --- a/hermit/kernel/tasks.c +++ b/hermit/kernel/tasks.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -41,16 +42,16 @@ * A task's id will be its position in this array. */ static task_t task_table[MAX_TASKS] = { \ - [0] = {0, TASK_IDLE, 0, NULL, NULL, TASK_DEFAULT_FLAGS, 0, 0, SPINLOCK_IRQSAVE_INIT, SPINLOCK_INIT, NULL, NULL, ATOMIC_INIT(0), NULL, NULL}, \ - [1 ... MAX_TASKS-1] = {0, TASK_INVALID, 0, NULL, NULL, TASK_DEFAULT_FLAGS, 0, 0, SPINLOCK_IRQSAVE_INIT, SPINLOCK_INIT, NULL, NULL,ATOMIC_INIT(0), NULL, NULL}}; + [0] = {0, TASK_IDLE, 0, NULL, NULL, TASK_DEFAULT_FLAGS, 0, 0, 0, SPINLOCK_IRQSAVE_INIT, SPINLOCK_INIT, NULL, NULL, ATOMIC_INIT(0), NULL, NULL, 0}, \ + [1 ... MAX_TASKS-1] = {0, TASK_INVALID, 0, NULL, NULL, TASK_DEFAULT_FLAGS, 0, 0, 0, SPINLOCK_IRQSAVE_INIT, SPINLOCK_INIT, NULL, NULL,ATOMIC_INIT(0), NULL, NULL, 0}}; static spinlock_irqsave_t table_lock = SPINLOCK_IRQSAVE_INIT; #if MAX_CORES > 1 static readyqueues_t readyqueues[MAX_CORES] = { \ - [0 ... MAX_CORES-1] = {NULL, NULL, 0, 0, {[0 ... MAX_PRIO-2] = {NULL, NULL}}, SPINLOCK_IRQSAVE_INIT}}; + [0 ... MAX_CORES-1] = {NULL, NULL, 0, 0, {[0 ... MAX_PRIO-2] = {NULL, NULL}}, {NULL, NULL}, SPINLOCK_IRQSAVE_INIT}}; #else -static readyqueues_t readyqueues[1] = {[0] = {task_table+0, NULL, 0, 0, {[0 ... MAX_PRIO-2] = {NULL, NULL}}, SPINLOCK_IRQSAVE_INIT}}; +static readyqueues_t readyqueues[1] = {[0] = {task_table+0, NULL, 0, 0, {[0 ... MAX_PRIO-2] = {NULL, NULL}}, {NULL, NULL}, SPINLOCK_IRQSAVE_INIT}}; #endif DEFINE_PER_CORE(task_t*, current_task, task_table+0); @@ -64,6 +65,14 @@ task_t* get_current_task(void) return per_core(current_task); } +void check_scheduling(void) +{ + if (!is_irq_enabled()) + return; + if (msb(readyqueues[CORE_ID].prio_bitmap) > per_core(current_task)->prio) + reschedule(); +} + uint32_t get_highest_priority(void) { return msb(readyqueues[CORE_ID].prio_bitmap); @@ -229,6 +238,7 @@ int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio, uint32_t c spinlock_init(&task_table[i].vma_lock); task_table[i].vma_list = NULL; task_table[i].heap = NULL; + task_table[i].lwip_err = 0; spinlock_irqsave_init(&task_table[i].page_lock); atomic_int32_set(&task_table[i].user_usage, 0); @@ -314,6 +324,19 @@ int wakeup_task(tid_t id) // increase the number of ready tasks readyqueues[core_id].nr_tasks++; + // do we need to remove from timer queue? + if (task->flags & TASK_TIMER) { + task->flags &= ~TASK_TIMER; + if (task->prev) + task->prev->next = task->next; + if (task->next) + task->next->prev = task->prev; + if (readyqueues[core_id].timers.first == task) + readyqueues[core_id].timers.first = task->next; + if (readyqueues[core_id].timers.last == task) + readyqueues[core_id].timers.last = task->prev; + } + // add task to the runqueue if (!readyqueues[core_id].queue[prio-1].last) { readyqueues[core_id].queue[prio-1].last = readyqueues[core_id].queue[prio-1].first = task; @@ -388,6 +411,132 @@ int block_current_task(void) return ret; } +int set_timer(uint64_t deadline) +{ + task_t* curr_task; + task_t* tmp; + uint32_t core_id, prio; + uint32_t flags; + int ret = -EINVAL; + + flags = irq_nested_disable(); + + curr_task = per_core(current_task); + prio = curr_task->prio; + core_id = CORE_ID; + + if (curr_task->status == TASK_RUNNING) { + curr_task->status = TASK_BLOCKED; + curr_task->timeout = deadline; + curr_task->flags |= TASK_TIMER; + ret = 0; + + spinlock_irqsave_lock(&readyqueues[core_id].lock); + + // reduce the number of ready tasks + readyqueues[core_id].nr_tasks--; + + // remove task from queue + if (curr_task->prev) + curr_task->prev->next = curr_task->next; + if (curr_task->next) + curr_task->next->prev = curr_task->prev; + if (readyqueues[core_id].queue[prio-1].first == curr_task) + readyqueues[core_id].queue[prio-1].first = curr_task->next; + if (readyqueues[core_id].queue[prio-1].last == curr_task) { + readyqueues[core_id].queue[prio-1].last = curr_task->prev; + if (!readyqueues[core_id].queue[prio-1].last) + readyqueues[core_id].queue[prio-1].last = readyqueues[core_id].queue[prio-1].first; + } + + // No valid task in queue => update prio_bitmap + if (!readyqueues[core_id].queue[prio-1].first) + readyqueues[core_id].prio_bitmap &= ~(1 << prio); + + // add task to the timer queue + tmp = readyqueues[core_id].timers.first; + if (!tmp) { + readyqueues[core_id].timers.first = readyqueues[core_id].timers.last = curr_task; + curr_task->prev = curr_task->next = NULL; + } else { + while(tmp && (deadline >= tmp->timeout)) + tmp = tmp->next; + + if (!tmp) { + curr_task->next = NULL; + curr_task->prev = readyqueues[core_id].timers.last; + if (readyqueues[core_id].timers.last) + readyqueues[core_id].timers.last->next = curr_task; + readyqueues[core_id].timers.last = curr_task; + // obsolete lines... + //if (!readyqueues[core_id].timers.first) + // readyqueues[core_id].timers.first = curr_task; + } else { + curr_task->prev = tmp->prev; + curr_task->next = tmp; + tmp->prev = curr_task; + if (curr_task->prev) + curr_task->prev->next = curr_task; + if (readyqueues[core_id].timers.first == tmp) + readyqueues[core_id].timers.first = curr_task; + } + } + + spinlock_irqsave_unlock(&readyqueues[core_id].lock); + } else kprintf("Task is already blocked. No timer will be set!\n"); + + irq_nested_enable(flags); + + return ret; +} + +void check_timers(void) +{ + uint32_t core_id = CORE_ID; + uint32_t prio; + uint64_t current_tick; + + spinlock_irqsave_lock(&readyqueues[core_id].lock); + + // check timers + current_tick = get_clock_tick(); + while (readyqueues[core_id].timers.first && readyqueues[core_id].timers.first->timeout <= current_tick) + { + task_t* task = readyqueues[core_id].timers.first; + + // remove timer from queue + readyqueues[core_id].timers.first = readyqueues[core_id].timers.first->next; + if (readyqueues[core_id].timers.first) + readyqueues[core_id].timers.first->prev = NULL; + else + readyqueues[core_id].timers.last = NULL; + task->flags &= ~TASK_TIMER; + + // wakeup task + if (task->status == TASK_BLOCKED) { + task->status = TASK_READY; + prio = task->prio; + + // increase the number of ready tasks + readyqueues[core_id].nr_tasks++; + + // add task to the runqueue + if (!readyqueues[core_id].queue[prio-1].first) { + readyqueues[core_id].queue[prio-1].last = readyqueues[core_id].queue[prio-1].first = task; + task->next = task->prev = NULL; + readyqueues[core_id].prio_bitmap |= (1 << prio); + } else { + task->prev = readyqueues[core_id].queue[prio-1].last; + task->next = NULL; + readyqueues[core_id].queue[prio-1].last->next = task; + readyqueues[core_id].queue[prio-1].last = task; + } + } + } + + spinlock_irqsave_unlock(&readyqueues[core_id].lock); +} + size_t** scheduler(void) { task_t* orig_task;