mirror of
https://github.com/hermitcore/libhermit.git
synced 2025-03-09 00:00:03 +01:00
Merge pull request #40 from daniel-k/pr/cleanup
Various cleanup and refactoring throughout the whole codebase
This commit is contained in:
commit
b149bad0a9
15 changed files with 456 additions and 379 deletions
|
@ -204,6 +204,7 @@ int apic_is_enabled(void);
|
|||
int apic_enable_timer(void);
|
||||
int apic_disable_timer(void);
|
||||
int apic_timer_deadline(uint32_t);
|
||||
int apic_timer_is_running(void);
|
||||
int apic_send_ipi(uint64_t dest, uint8_t irq);
|
||||
int ioapic_inton(uint8_t irq, uint8_t apicid);
|
||||
int ioapic_intoff(uint8_t irq, uint8_t apicid);
|
||||
|
|
|
@ -41,54 +41,12 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
/** @brief Disable IRQs
|
||||
*
|
||||
* This inline function just clears out the interrupt bit
|
||||
*/
|
||||
inline static void irq_disable(void) {
|
||||
asm volatile("cli" ::: "memory");
|
||||
}
|
||||
|
||||
/** @brief Disable IRQs (nested)
|
||||
*
|
||||
* Disable IRQs when unsure if IRQs were enabled at all.\n
|
||||
* This function together with irq_nested_enable can be used
|
||||
* in situations when interrupts shouldn't be activated if they
|
||||
* were not activated before calling this function.
|
||||
*
|
||||
* @return The set of flags which have been set until now
|
||||
*/
|
||||
inline static uint8_t irq_nested_disable(void) {
|
||||
size_t flags;
|
||||
asm volatile("pushf; cli; pop %0": "=r"(flags) : : "memory");
|
||||
if (flags & (1 << 9))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** @brief Enable IRQs */
|
||||
inline static void irq_enable(void) {
|
||||
asm volatile("sti" ::: "memory");
|
||||
}
|
||||
|
||||
/** @brief Enable IRQs (nested)
|
||||
*
|
||||
* If called after calling irq_nested_disable, this function will
|
||||
* not activate IRQs if they were not active before.
|
||||
*
|
||||
* @param flags Flags to set. Could be the old ones you got from irq_nested_disable.
|
||||
*/
|
||||
inline static void irq_nested_enable(uint8_t flags) {
|
||||
if (flags)
|
||||
irq_enable();
|
||||
}
|
||||
|
||||
/** @brief Determines, if the interrupt flags (IF) is set
|
||||
*
|
||||
* @return
|
||||
* - 1 interrupt flag is set
|
||||
* - 0 interrupt flag is cleared
|
||||
*/
|
||||
*/
|
||||
inline static uint8_t is_irq_enabled(void)
|
||||
{
|
||||
size_t flags;
|
||||
|
@ -98,6 +56,49 @@ inline static uint8_t is_irq_enabled(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/** @brief Disable IRQs
|
||||
*
|
||||
* This inline function just clears out the interrupt bit
|
||||
*/
|
||||
inline static void irq_disable(void) {
|
||||
asm volatile("cli" ::: "memory");
|
||||
}
|
||||
|
||||
/** @brief Enable IRQs
|
||||
*
|
||||
* This inline function just sets the interrupt bit
|
||||
*/
|
||||
inline static void irq_enable(void) {
|
||||
asm volatile("sti" ::: "memory");
|
||||
}
|
||||
|
||||
/** @brief Disable IRQs (nested)
|
||||
*
|
||||
* Disable IRQs when unsure if IRQs were enabled at all.
|
||||
* This function together with irq_nested_enable can be used
|
||||
* in situations when interrupts shouldn't be activated if they
|
||||
* were not activated before calling this function.
|
||||
*
|
||||
* @return Whether IRQs had been enabled or not before disabling
|
||||
*/
|
||||
inline static uint8_t irq_nested_disable(void) {
|
||||
uint8_t was_enabled = is_irq_enabled();
|
||||
irq_disable();
|
||||
return was_enabled;
|
||||
}
|
||||
|
||||
/** @brief Enable IRQs (nested)
|
||||
*
|
||||
* Can be used in conjunction with irq_nested_disable() to only enable
|
||||
* interrupts again if they were enabled before.
|
||||
*
|
||||
* @param was_enabled Whether IRQs should be enabled or not
|
||||
*/
|
||||
inline static void irq_nested_enable(uint8_t was_enabled) {
|
||||
if (was_enabled)
|
||||
irq_enable();
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -56,6 +56,8 @@ typedef struct {
|
|||
uint32_t status;
|
||||
} i387_fsave_t;
|
||||
|
||||
#define FPU_STATE_INIT { {0, 0, 0, 0, 0, 0, 0, { [0 ... 19] = 0 }, 0} }
|
||||
|
||||
typedef struct {
|
||||
uint16_t cwd;
|
||||
uint16_t swd;
|
||||
|
|
|
@ -303,6 +303,15 @@ static inline void set_ipi_dest(uint32_t cpu_id) {
|
|||
lapic_write(APIC_ICR2, tmp);
|
||||
}
|
||||
|
||||
int apic_timer_is_running(void)
|
||||
{
|
||||
if (BUILTIN_EXPECT(apic_is_enabled(), 1)) {
|
||||
return lapic_read(APIC_CCR) != 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int apic_timer_deadline(uint32_t ticks)
|
||||
{
|
||||
if (BUILTIN_EXPECT(apic_is_enabled() && icr, 1)) {
|
||||
|
|
|
@ -361,6 +361,20 @@ isrstub_pseudo_error 9
|
|||
%assign i i+1
|
||||
%endrep
|
||||
|
||||
global wakeup
|
||||
align 64
|
||||
wakeup:
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 121
|
||||
jmp common_stub
|
||||
|
||||
global mmnif_irq
|
||||
align 64
|
||||
mmnif_irq:
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 122
|
||||
jmp common_stub
|
||||
|
||||
global apic_timer
|
||||
align 64
|
||||
apic_timer:
|
||||
|
@ -396,20 +410,6 @@ apic_svr:
|
|||
push byte 127
|
||||
jmp common_stub
|
||||
|
||||
global wakeup
|
||||
align 64
|
||||
wakeup:
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 121
|
||||
jmp common_stub
|
||||
|
||||
global mmnif_irq
|
||||
align 64
|
||||
mmnif_irq:
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 122
|
||||
jmp common_stub
|
||||
|
||||
extern irq_handler
|
||||
extern get_current_stack
|
||||
extern finish_task_switch
|
||||
|
|
|
@ -91,7 +91,7 @@ extern void mmnif_irq(void);
|
|||
* This array is actually an array of function pointers. We use
|
||||
* this to handle custom IRQ handlers for a given IRQ
|
||||
*/
|
||||
static void* irq_routines[MAX_HANDLERS] = {[0 ... MAX_HANDLERS-1] = NULL};
|
||||
static irq_handler_t irq_routines[MAX_HANDLERS] = {[0 ... MAX_HANDLERS-1] = NULL};
|
||||
static uint64_t irq_counter[MAX_CORES][MAX_HANDLERS] = {[0 ... MAX_CORES-1][0 ... MAX_HANDLERS-1] = 0};
|
||||
#ifdef MEASURE_IRQ
|
||||
static int go = 0;
|
||||
|
@ -281,40 +281,41 @@ int irq_init(void)
|
|||
*/
|
||||
size_t** irq_handler(struct state *s)
|
||||
{
|
||||
size_t** ret = NULL;
|
||||
#ifdef MEASURE_IRQ
|
||||
uint64_t diff = 0;
|
||||
#endif
|
||||
|
||||
/* This is a blank function pointer */
|
||||
void (*handler) (struct state * s);
|
||||
|
||||
#ifdef MEASURE_IRQ
|
||||
if (go)
|
||||
diff = rdtsc();
|
||||
#endif
|
||||
|
||||
size_t** ret = NULL;
|
||||
|
||||
if(BUILTIN_EXPECT(s->int_no >= MAX_HANDLERS, 0)) {
|
||||
kprintf("[%d] Invalid IRQ number %d\n", CORE_ID, s->int_no);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
irq_counter[CORE_ID][s->int_no]++;
|
||||
|
||||
check_workqueues_in_irqhandler(s->int_no);
|
||||
|
||||
/*
|
||||
* Find out if we have a custom handler to run for this
|
||||
* IRQ and then finally, run it
|
||||
*/
|
||||
if (BUILTIN_EXPECT(s->int_no < MAX_HANDLERS, 1)) {
|
||||
handler = irq_routines[s->int_no];
|
||||
if (handler)
|
||||
handler(s);
|
||||
else
|
||||
kprintf("Unhandle IRQ %d\n", s->int_no);
|
||||
} else kprintf("Invalid interrupt number %d\n", s->int_no);
|
||||
// Find out if we have a custom handler to run for this IRQ and run it
|
||||
irq_handler_t handler = irq_routines[s->int_no];
|
||||
|
||||
// timer interrupt?
|
||||
if ((s->int_no == 32) || (s->int_no == 123))
|
||||
ret = scheduler(); // switch to a new task
|
||||
else if ((s->int_no >= 32) && (get_highest_priority() > per_core(current_task)->prio))
|
||||
if (handler) {
|
||||
handler(s);
|
||||
} else {
|
||||
kprintf("[%d] Unhandled IRQ %d\n", CORE_ID, s->int_no);
|
||||
}
|
||||
|
||||
// Check if timers have expired that would unblock tasks
|
||||
check_workqueues_in_irqhandler((int) s->int_no);
|
||||
|
||||
if ((s->int_no == 32) || (s->int_no == 123)) {
|
||||
// a timer interrupt may have caused unblocking of tasks
|
||||
ret = scheduler();
|
||||
} else if ((s->int_no >= 32) && (get_highest_priority() > per_core(current_task)->prio)) {
|
||||
// there's a ready task with higher priority
|
||||
ret = scheduler();
|
||||
}
|
||||
|
||||
apic_eoi(s->int_no);
|
||||
|
||||
|
|
|
@ -54,13 +54,15 @@ void check_ticks(void)
|
|||
if (!cpu_freq)
|
||||
return;
|
||||
|
||||
uint64_t curr_rdtsc = get_rdtsc();
|
||||
uint64_t diff;
|
||||
|
||||
const uint64_t curr_rdtsc = has_rdtscp() ? rdtscp(NULL) : rdtsc();
|
||||
rmb();
|
||||
diff = ((curr_rdtsc - per_core(last_rdtsc)) * (uint64_t)TIMER_FREQ) / (1000000ULL*(uint64_t)get_cpu_frequency());
|
||||
if (diff > 0) {
|
||||
set_per_core(timer_ticks, per_core(timer_ticks) + diff);
|
||||
|
||||
const uint64_t diff_cycles = curr_rdtsc - per_core(last_rdtsc);
|
||||
const uint64_t cpu_freq_hz = 1000000ULL * (uint64_t) get_cpu_frequency();
|
||||
const uint64_t diff_ticks = (diff_cycles * (uint64_t) TIMER_FREQ) / cpu_freq_hz;
|
||||
|
||||
if (diff_ticks > 0) {
|
||||
set_per_core(timer_ticks, per_core(timer_ticks) + diff_ticks);
|
||||
set_per_core(last_rdtsc, curr_rdtsc);
|
||||
rmb();
|
||||
}
|
||||
|
@ -187,7 +189,7 @@ int timer_init(void)
|
|||
irq_install_handler(121, wakeup_handler);
|
||||
|
||||
#ifdef DYNAMIC_TICKS
|
||||
boot_tsc = get_rdtsc();
|
||||
boot_tsc = has_rdtscp() ? rdtscp(NULL) : rdtsc();
|
||||
set_per_core(last_rdtsc, boot_tsc);
|
||||
#endif
|
||||
|
||||
|
|
|
@ -173,13 +173,13 @@ inline static int spinlock_irqsave_destroy(spinlock_irqsave_t* s) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
/** @brief Unlock an irqsave spinlock on exit of critical section
|
||||
/** @brief Lock spinlock on entry of critical section and disable interrupts
|
||||
* @return
|
||||
* - 0 on success
|
||||
* - -EINVAL (-22) on failure
|
||||
*/
|
||||
inline static int spinlock_irqsave_lock(spinlock_irqsave_t* s) {
|
||||
uint32_t flags;
|
||||
uint8_t flags;
|
||||
int32_t ticket;
|
||||
|
||||
if (BUILTIN_EXPECT(!s, 0))
|
||||
|
@ -191,14 +191,10 @@ inline static int spinlock_irqsave_lock(spinlock_irqsave_t* s) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
#if 1
|
||||
ticket = atomic_int32_inc(&s->queue);
|
||||
while (atomic_int32_read(&s->dequeue) != ticket) {
|
||||
PAUSE;
|
||||
}
|
||||
#else
|
||||
while( atomic_int32_test_and_set(&s->dequeue,0) );
|
||||
#endif
|
||||
|
||||
s->coreid = CORE_ID;
|
||||
s->flags = flags;
|
||||
|
@ -207,13 +203,13 @@ inline static int spinlock_irqsave_lock(spinlock_irqsave_t* s) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
/** @brief Unlock irqsave spinlock on exit of critical section and re-enable interrupts
|
||||
/** @brief Unlock spinlock on exit of critical section and re-enable interrupts
|
||||
* @return
|
||||
* - 0 on success
|
||||
* - -EINVAL (-22) on failure
|
||||
*/
|
||||
inline static int spinlock_irqsave_unlock(spinlock_irqsave_t* s) {
|
||||
uint32_t flags;
|
||||
uint8_t flags;
|
||||
|
||||
if (BUILTIN_EXPECT(!s, 0))
|
||||
return -EINVAL;
|
||||
|
@ -223,11 +219,9 @@ inline static int spinlock_irqsave_unlock(spinlock_irqsave_t* s) {
|
|||
flags = s->flags;
|
||||
s->coreid = (uint32_t) -1;
|
||||
s->flags = 0;
|
||||
#if 1
|
||||
|
||||
atomic_int32_inc(&s->dequeue);
|
||||
#else
|
||||
atomic_int32_set(&s->dequeue,1);
|
||||
#endif
|
||||
|
||||
irq_nested_enable(flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -47,6 +47,7 @@ extern "C" {
|
|||
/** @brief System call to terminate a user level process */
|
||||
void NORETURN sys_exit(int);
|
||||
|
||||
|
||||
/** @brief Task switcher
|
||||
*
|
||||
* Timer-interrupted use of this function for task switching
|
||||
|
@ -57,6 +58,7 @@ void NORETURN sys_exit(int);
|
|||
*/
|
||||
size_t** scheduler(void);
|
||||
|
||||
|
||||
/** @brief Initialize the multitasking subsystem
|
||||
*
|
||||
* This procedure sets the current task to the
|
||||
|
@ -68,6 +70,7 @@ size_t** scheduler(void);
|
|||
*/
|
||||
int multitasking_init(void);
|
||||
|
||||
|
||||
/** @brief Clone current task with a specific entry point
|
||||
*
|
||||
* @todo Don't acquire table_lock for the whole task creation.
|
||||
|
@ -84,6 +87,7 @@ int multitasking_init(void);
|
|||
*/
|
||||
int clone_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio);
|
||||
|
||||
|
||||
/** @brief Create a task with a specific entry point
|
||||
*
|
||||
* @todo Don't acquire table_lock for the whole task creation.
|
||||
|
@ -100,6 +104,7 @@ int clone_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio);
|
|||
*/
|
||||
int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio, uint32_t core_id);
|
||||
|
||||
|
||||
/** @brief create a kernel-level task on the current core.
|
||||
*
|
||||
* @param id The value behind this pointer will be set to the new task's id
|
||||
|
@ -113,6 +118,7 @@ int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio, uint32_t c
|
|||
*/
|
||||
int create_kernel_task(tid_t* id, entry_point_t ep, void* args, uint8_t prio);
|
||||
|
||||
|
||||
/** @brief create a kernel-level task.
|
||||
*
|
||||
* @param id The value behind this pointer will be set to the new task's id
|
||||
|
@ -127,6 +133,7 @@ int create_kernel_task(tid_t* id, entry_point_t ep, void* args, uint8_t prio);
|
|||
*/
|
||||
int create_kernel_task_on_core(tid_t* id, entry_point_t ep, void* args, uint8_t prio, uint32_t core_id);
|
||||
|
||||
|
||||
/** @brief Create a user level task.
|
||||
*
|
||||
* @param id The value behind this pointer will be set to the new task's id
|
||||
|
@ -150,12 +157,14 @@ int create_user_task_on_core(tid_t* id, const char* fame, char** argv, uint8_t p
|
|||
*/
|
||||
int init_tls(void);
|
||||
|
||||
|
||||
/** @brief Cleanup function for the task termination
|
||||
*
|
||||
* On termination, the task call this function to cleanup its address space.
|
||||
*/
|
||||
void finish_task_switch(void);
|
||||
|
||||
|
||||
/** @brief determine the highest priority of all tasks, which are ready
|
||||
*
|
||||
* @return
|
||||
|
@ -164,12 +173,14 @@ void finish_task_switch(void);
|
|||
*/
|
||||
uint32_t get_highest_priority(void);
|
||||
|
||||
|
||||
/** @brief Call to rescheduling
|
||||
*
|
||||
* This is a purely assembled procedure for rescheduling
|
||||
*/
|
||||
void reschedule(void);
|
||||
|
||||
|
||||
/** @brief Wake up a blocked task
|
||||
*
|
||||
* The task's status will be changed to TASK_READY
|
||||
|
@ -180,6 +191,7 @@ void reschedule(void);
|
|||
*/
|
||||
int wakeup_task(tid_t);
|
||||
|
||||
|
||||
/** @brief Block current task
|
||||
*
|
||||
* The current task's status will be changed to TASK_BLOCKED
|
||||
|
@ -190,6 +202,7 @@ int wakeup_task(tid_t);
|
|||
*/
|
||||
int block_current_task(void);
|
||||
|
||||
|
||||
/** @brief Get a process control block
|
||||
*
|
||||
* @param id ID of the task to retrieve
|
||||
|
@ -202,6 +215,7 @@ int block_current_task(void);
|
|||
*/
|
||||
int get_task(tid_t id, task_t** task);
|
||||
|
||||
|
||||
/** @brief Block current task until timer expires
|
||||
*
|
||||
* @param deadline Clock tick, when the timer expires
|
||||
|
@ -211,17 +225,21 @@ int get_task(tid_t id, task_t** task);
|
|||
*/
|
||||
int set_timer(uint64_t deadline);
|
||||
|
||||
|
||||
/** @brief check is a timer is expired
|
||||
*
|
||||
*/
|
||||
void check_timers(void);
|
||||
|
||||
|
||||
/** @brief Abort current task */
|
||||
void NORETURN do_abort(void);
|
||||
|
||||
|
||||
/** @brief This function shall be called by leaving kernel-level tasks */
|
||||
void NORETURN leave_kernel_task(void);
|
||||
|
||||
|
||||
/** @brief if a task exists with higher priority, HermitCore switch to it.
|
||||
*/
|
||||
void check_scheduling(void);
|
||||
|
@ -230,23 +248,27 @@ void check_scheduling(void);
|
|||
*/
|
||||
int network_shutdown(void);
|
||||
|
||||
|
||||
#ifdef DYNAMIC_TICKS
|
||||
/** @brief check, if the tick counter has to be updated
|
||||
*/
|
||||
void check_ticks(void);
|
||||
#endif
|
||||
|
||||
extern volatile uint32_t go_down;
|
||||
|
||||
/** @brief shutdown the whole system
|
||||
*/
|
||||
void shutdown_system(void);
|
||||
|
||||
|
||||
extern volatile uint32_t go_down;
|
||||
static inline void check_workqueues_in_irqhandler(int irq)
|
||||
{
|
||||
#ifdef DYNAMIC_TICKS
|
||||
// Increment ticks
|
||||
check_ticks();
|
||||
#endif
|
||||
|
||||
check_timers();
|
||||
|
||||
if (irq < 0) {
|
||||
|
|
|
@ -136,6 +136,66 @@ typedef struct {
|
|||
spinlock_irqsave_t lock;
|
||||
} readyqueues_t;
|
||||
|
||||
|
||||
static inline void task_list_remove_task(task_list_t* list, task_t* task)
|
||||
{
|
||||
if (task->prev)
|
||||
task->prev->next = task->next;
|
||||
|
||||
if (task->next)
|
||||
task->next->prev = task->prev;
|
||||
|
||||
if (list->last == task)
|
||||
list->last = task->prev;
|
||||
|
||||
if (list->first == task)
|
||||
list->first = task->next;
|
||||
}
|
||||
|
||||
|
||||
static inline void task_list_push_back(task_list_t* list, task_t* task)
|
||||
{
|
||||
if(BUILTIN_EXPECT((task == NULL) || (list == NULL), 0)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (list->last) {
|
||||
task->prev = list->last;
|
||||
task->next = NULL;
|
||||
list->last->next = task;
|
||||
list->last = task;
|
||||
} else {
|
||||
list->last = list->first = task;
|
||||
task->next = task->prev = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static inline task_t* task_list_pop_front(task_list_t* list)
|
||||
{
|
||||
if(BUILTIN_EXPECT((list == NULL), 0)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
task_t* task = list->first;
|
||||
|
||||
if(list->first) {
|
||||
// advance list
|
||||
list->first = list->first->next;
|
||||
|
||||
if(list->first) {
|
||||
// first element has no previous element
|
||||
list->first->prev = NULL;
|
||||
} else {
|
||||
// no first element => no last element either
|
||||
list->last = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
task->next = task->prev = NULL;
|
||||
return task;
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -83,7 +83,9 @@ static inline void sleep(unsigned int sec) { timer_wait(sec*TIMER_FREQ); }
|
|||
|
||||
static inline int timer_deadline(uint32_t t) { return apic_timer_deadline(t); }
|
||||
|
||||
static inline void timer_disable() { apic_disable_timer(); }
|
||||
static inline void timer_disable(void) { apic_disable_timer(); }
|
||||
|
||||
static inline int timer_is_running(void) { return apic_timer_is_running(); }
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -44,7 +44,7 @@
|
|||
#include <lwip/stats.h>
|
||||
|
||||
//TODO: don't use one big kernel lock to comminicate with all proxies
|
||||
static spinlock_t lwip_lock = SPINLOCK_INIT;
|
||||
static spinlock_irqsave_t lwip_lock = SPINLOCK_IRQSAVE_INIT;
|
||||
|
||||
extern spinlock_irqsave_t stdio_lock;
|
||||
extern int32_t isle;
|
||||
|
@ -85,7 +85,7 @@ void NORETURN sys_exit(int arg)
|
|||
{
|
||||
sys_exit_t sysargs = {__NR_exit, arg};
|
||||
|
||||
spinlock_lock(&lwip_lock);
|
||||
spinlock_irqsave_lock(&lwip_lock);
|
||||
if (libc_sd >= 0)
|
||||
{
|
||||
int s = libc_sd;
|
||||
|
@ -93,7 +93,7 @@ void NORETURN sys_exit(int arg)
|
|||
lwip_write(s, &sysargs, sizeof(sysargs));
|
||||
libc_sd = -1;
|
||||
|
||||
spinlock_unlock(&lwip_lock);
|
||||
spinlock_irqsave_unlock(&lwip_lock);
|
||||
|
||||
// switch to LwIP thread
|
||||
reschedule();
|
||||
|
@ -102,7 +102,7 @@ void NORETURN sys_exit(int arg)
|
|||
idle_poll = 0;
|
||||
} else {
|
||||
idle_poll = 0;
|
||||
spinlock_unlock(&lwip_lock);
|
||||
spinlock_irqsave_unlock(&lwip_lock);
|
||||
}
|
||||
|
||||
do_exit(arg);
|
||||
|
@ -129,9 +129,9 @@ ssize_t sys_read(int fd, char* buf, size_t len)
|
|||
return ret;
|
||||
}
|
||||
|
||||
spinlock_lock(&lwip_lock);
|
||||
spinlock_irqsave_lock(&lwip_lock);
|
||||
if (libc_sd < 0) {
|
||||
spinlock_unlock(&lwip_lock);
|
||||
spinlock_irqsave_unlock(&lwip_lock);
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
|
@ -147,7 +147,7 @@ ssize_t sys_read(int fd, char* buf, size_t len)
|
|||
{
|
||||
ret = lwip_read(s, buf+i, j-i);
|
||||
if (ret < 0) {
|
||||
spinlock_unlock(&lwip_lock);
|
||||
spinlock_irqsave_unlock(&lwip_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -155,7 +155,7 @@ ssize_t sys_read(int fd, char* buf, size_t len)
|
|||
}
|
||||
}
|
||||
|
||||
spinlock_unlock(&lwip_lock);
|
||||
spinlock_irqsave_unlock(&lwip_lock);
|
||||
|
||||
return j;
|
||||
}
|
||||
|
@ -189,10 +189,10 @@ ssize_t sys_write(int fd, const char* buf, size_t len)
|
|||
return ret;
|
||||
}
|
||||
|
||||
spinlock_lock(&lwip_lock);
|
||||
spinlock_irqsave_lock(&lwip_lock);
|
||||
if (libc_sd < 0)
|
||||
{
|
||||
spinlock_unlock(&lwip_lock);
|
||||
spinlock_irqsave_unlock(&lwip_lock);
|
||||
|
||||
spinlock_irqsave_lock(&stdio_lock);
|
||||
for(i=0; i<len; i++)
|
||||
|
@ -210,7 +210,7 @@ ssize_t sys_write(int fd, const char* buf, size_t len)
|
|||
{
|
||||
ret = lwip_write(s, (char*)buf+i, len-i);
|
||||
if (ret < 0) {
|
||||
spinlock_unlock(&lwip_lock);
|
||||
spinlock_irqsave_unlock(&lwip_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -223,7 +223,7 @@ ssize_t sys_write(int fd, const char* buf, size_t len)
|
|||
i = ret;
|
||||
} else i = len;
|
||||
|
||||
spinlock_unlock(&lwip_lock);
|
||||
spinlock_irqsave_unlock(&lwip_lock);
|
||||
|
||||
return i;
|
||||
}
|
||||
|
@ -266,7 +266,7 @@ int sys_open(const char* name, int flags, int mode)
|
|||
int s, i, ret, sysnr = __NR_open;
|
||||
size_t len;
|
||||
|
||||
spinlock_lock(&lwip_lock);
|
||||
spinlock_irqsave_lock(&lwip_lock);
|
||||
if (libc_sd < 0) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
|
@ -309,7 +309,7 @@ int sys_open(const char* name, int flags, int mode)
|
|||
lwip_read(s, &ret, sizeof(ret));
|
||||
|
||||
out:
|
||||
spinlock_unlock(&lwip_lock);
|
||||
spinlock_irqsave_unlock(&lwip_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -333,7 +333,7 @@ int sys_close(int fd)
|
|||
return 0;
|
||||
}
|
||||
|
||||
spinlock_lock(&lwip_lock);
|
||||
spinlock_irqsave_lock(&lwip_lock);
|
||||
if (libc_sd < 0) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
|
@ -346,7 +346,7 @@ int sys_close(int fd)
|
|||
lwip_read(s, &ret, sizeof(ret));
|
||||
|
||||
out:
|
||||
spinlock_unlock(&lwip_lock);
|
||||
spinlock_irqsave_unlock(&lwip_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -443,10 +443,10 @@ off_t sys_lseek(int fd, off_t offset, int whence)
|
|||
sys_lseek_t sysargs = {__NR_lseek, fd, offset, whence};
|
||||
int s;
|
||||
|
||||
spinlock_lock(&lwip_lock);
|
||||
spinlock_irqsave_lock(&lwip_lock);
|
||||
|
||||
if (libc_sd < 0) {
|
||||
spinlock_unlock(&lwip_lock);
|
||||
spinlock_irqsave_unlock(&lwip_lock);
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
|
@ -454,7 +454,7 @@ off_t sys_lseek(int fd, off_t offset, int whence)
|
|||
lwip_write(s, &sysargs, sizeof(sysargs));
|
||||
lwip_read(s, &off, sizeof(off));
|
||||
|
||||
spinlock_unlock(&lwip_lock);
|
||||
spinlock_irqsave_unlock(&lwip_lock);
|
||||
|
||||
return off;
|
||||
}
|
||||
|
|
|
@ -54,8 +54,8 @@ extern atomic_int32_t cpu_online;
|
|||
* A task's id will be its position in this array.
|
||||
*/
|
||||
static task_t task_table[MAX_TASKS] = { \
|
||||
[0] = {0, TASK_IDLE, 0, NULL, NULL, NULL, TASK_DEFAULT_FLAGS, 0, 0, 0, NULL, 0, NULL, NULL, 0, 0, 0}, \
|
||||
[1 ... MAX_TASKS-1] = {0, TASK_INVALID, 0, NULL, NULL, NULL, TASK_DEFAULT_FLAGS, 0, 0, 0, NULL, 0, NULL, NULL, 0, 0, 0}};
|
||||
[0] = {0, TASK_IDLE, 0, NULL, NULL, NULL, TASK_DEFAULT_FLAGS, 0, 0, 0, NULL, 0, NULL, NULL, 0, 0, 0, NULL, FPU_STATE_INIT}, \
|
||||
[1 ... MAX_TASKS-1] = {0, TASK_INVALID, 0, NULL, NULL, NULL, TASK_DEFAULT_FLAGS, 0, 0, 0, NULL, 0, NULL, NULL, 0, 0, 0, NULL, FPU_STATE_INIT}};
|
||||
|
||||
static spinlock_irqsave_t table_lock = SPINLOCK_IRQSAVE_INIT;
|
||||
|
||||
|
@ -68,28 +68,143 @@ static readyqueues_t readyqueues[1] = {[0] = {task_table+0, NULL, 0, 0, 0, {[0 .
|
|||
|
||||
DEFINE_PER_CORE(task_t*, current_task, task_table+0);
|
||||
DEFINE_PER_CORE(char*, kernel_stack, NULL);
|
||||
|
||||
#if MAX_CORES > 1
|
||||
DEFINE_PER_CORE(uint32_t, __core_id, 0);
|
||||
#endif
|
||||
|
||||
extern const void boot_stack;
|
||||
extern const void boot_ist;
|
||||
|
||||
/** @brief helper function for the assembly code to determine the current task
|
||||
* @return Pointer to the task_t structure of current task
|
||||
*/
|
||||
task_t* get_current_task(void)
|
||||
|
||||
static void update_timer(task_t* first)
|
||||
{
|
||||
return per_core(current_task);
|
||||
if(first) {
|
||||
if(first->timeout > get_clock_tick()) {
|
||||
timer_deadline((uint32_t) (first->timeout - get_clock_tick()));
|
||||
} else {
|
||||
// workaround: start timer so new head will be serviced
|
||||
timer_deadline(1);
|
||||
}
|
||||
} else {
|
||||
// prevent spurious interrupts
|
||||
timer_disable();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void timer_queue_remove(uint32_t core_id, task_t* task)
|
||||
{
|
||||
if(BUILTIN_EXPECT(!task, 0)) {
|
||||
return;
|
||||
}
|
||||
|
||||
task_list_t* timer_queue = &readyqueues[core_id].timers;
|
||||
|
||||
#ifdef DYNAMIC_TICKS
|
||||
// if task is first in timer queue, we need to update the oneshot
|
||||
// timer for the next task
|
||||
if(timer_queue->first == task) {
|
||||
update_timer(task->next);
|
||||
}
|
||||
#endif
|
||||
|
||||
task_list_remove_task(timer_queue, task);
|
||||
}
|
||||
|
||||
|
||||
static void timer_queue_push(uint32_t core_id, task_t* task)
|
||||
{
|
||||
task_list_t* timer_queue = &readyqueues[core_id].timers;
|
||||
|
||||
spinlock_irqsave_lock(&readyqueues[core_id].lock);
|
||||
|
||||
task_t* first = timer_queue->first;
|
||||
|
||||
if(!first) {
|
||||
timer_queue->first = timer_queue->last = task;
|
||||
task->next = task->prev = NULL;
|
||||
|
||||
#ifdef DYNAMIC_TICKS
|
||||
update_timer(task);
|
||||
#endif
|
||||
} else {
|
||||
// lookup position where to insert task
|
||||
task_t* tmp = first;
|
||||
while(tmp && (task->timeout >= tmp->timeout))
|
||||
tmp = tmp->next;
|
||||
|
||||
if(!tmp) {
|
||||
// insert at the end of queue
|
||||
task->next = NULL;
|
||||
task->prev = timer_queue->last;
|
||||
|
||||
// there has to be a last element because there is also a first one
|
||||
timer_queue->last->next = task;
|
||||
timer_queue->last = task;
|
||||
} else {
|
||||
task->next = tmp;
|
||||
task->prev = tmp->prev;
|
||||
tmp->prev = task;
|
||||
|
||||
if(task->prev)
|
||||
task->prev->next = task;
|
||||
|
||||
if(timer_queue->first == tmp) {
|
||||
timer_queue->first = task;
|
||||
|
||||
#ifdef DYNAMIC_TICKS
|
||||
update_timer(task);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
spinlock_irqsave_unlock(&readyqueues[core_id].lock);
|
||||
}
|
||||
|
||||
|
||||
static void readyqueues_push_back(uint32_t core_id, task_t* task)
|
||||
{
|
||||
// idle task (prio=0) doesn't have a queue
|
||||
task_list_t* readyqueue = &readyqueues[core_id].queue[task->prio - 1];
|
||||
|
||||
task_list_push_back(readyqueue, task);
|
||||
|
||||
// update priority bitmap
|
||||
readyqueues[core_id].prio_bitmap |= (1 << task->prio);
|
||||
|
||||
// increase the number of ready tasks
|
||||
readyqueues[core_id].nr_tasks++;
|
||||
}
|
||||
|
||||
|
||||
static void readyqueues_remove(uint32_t core_id, task_t* task)
|
||||
{
|
||||
// idle task (prio=0) doesn't have a queue
|
||||
task_list_t* readyqueue = &readyqueues[core_id].queue[task->prio - 1];
|
||||
|
||||
task_list_remove_task(readyqueue, task);
|
||||
|
||||
// no valid task in queue => update priority bitmap
|
||||
if (readyqueue->first == NULL)
|
||||
readyqueues[core_id].prio_bitmap &= ~(1 << task->prio);
|
||||
|
||||
// reduce the number of ready tasks
|
||||
readyqueues[core_id].nr_tasks--;
|
||||
}
|
||||
|
||||
|
||||
void check_scheduling(void)
|
||||
{
|
||||
if (!is_irq_enabled())
|
||||
return;
|
||||
if (msb(readyqueues[CORE_ID].prio_bitmap) > per_core(current_task)->prio)
|
||||
|
||||
if (get_highest_priority() > per_core(current_task)->prio)
|
||||
reschedule();
|
||||
}
|
||||
|
||||
|
||||
uint32_t get_highest_priority(void)
|
||||
{
|
||||
uint32_t prio = msb(readyqueues[CORE_ID].prio_bitmap);
|
||||
|
@ -99,6 +214,7 @@ uint32_t get_highest_priority(void)
|
|||
return prio;
|
||||
}
|
||||
|
||||
|
||||
int multitasking_init(void)
|
||||
{
|
||||
uint32_t core_id = CORE_ID;
|
||||
|
@ -120,9 +236,12 @@ int multitasking_init(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* interrupt handler to save / restore the FPU context */
|
||||
void fpu_handler(struct state *s)
|
||||
{
|
||||
(void) s;
|
||||
|
||||
task_t* task = per_core(current_task);
|
||||
uint32_t core_id = CORE_ID;
|
||||
|
||||
|
@ -150,6 +269,7 @@ void fpu_handler(struct state *s)
|
|||
restore_fpu_state(&task->fpu);
|
||||
}
|
||||
|
||||
|
||||
int set_idle_task(void)
|
||||
{
|
||||
uint32_t i, core_id = CORE_ID;
|
||||
|
@ -182,6 +302,7 @@ int set_idle_task(void)
|
|||
return ret;
|
||||
}
|
||||
|
||||
|
||||
int init_tls(void)
|
||||
{
|
||||
task_t* curr_task = per_core(current_task);
|
||||
|
@ -209,15 +330,17 @@ int init_tls(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void finish_task_switch(void)
|
||||
{
|
||||
task_t* old;
|
||||
uint8_t prio;
|
||||
const uint32_t core_id = CORE_ID;
|
||||
|
||||
spinlock_irqsave_lock(&readyqueues[core_id].lock);
|
||||
|
||||
if ((old = readyqueues[core_id].old_task) != NULL) {
|
||||
readyqueues[core_id].old_task = NULL;
|
||||
|
||||
if (old->status == TASK_FINISHED) {
|
||||
/* cleanup task */
|
||||
if (old->stack) {
|
||||
|
@ -237,7 +360,6 @@ void finish_task_switch(void)
|
|||
}
|
||||
|
||||
old->last_stack_pointer = NULL;
|
||||
readyqueues[core_id].old_task = NULL;
|
||||
|
||||
if (readyqueues[core_id].fpu_owner == old->id)
|
||||
readyqueues[core_id].fpu_owner = 0;
|
||||
|
@ -245,26 +367,15 @@ void finish_task_switch(void)
|
|||
/* signalizes that this task could be reused */
|
||||
old->status = TASK_INVALID;
|
||||
} else {
|
||||
prio = old->prio;
|
||||
if (!readyqueues[core_id].queue[prio-1].first) {
|
||||
old->next = old->prev = NULL;
|
||||
readyqueues[core_id].queue[prio-1].first = readyqueues[core_id].queue[prio-1].last = old;
|
||||
} else {
|
||||
old->next = NULL;
|
||||
old->prev = readyqueues[core_id].queue[prio-1].last;
|
||||
readyqueues[core_id].queue[prio-1].last->next = old;
|
||||
readyqueues[core_id].queue[prio-1].last = old;
|
||||
}
|
||||
readyqueues[core_id].old_task = NULL;
|
||||
readyqueues[core_id].prio_bitmap |= (1 << prio);
|
||||
// re-enqueue old task
|
||||
readyqueues_push_back(core_id, old);
|
||||
}
|
||||
}
|
||||
|
||||
spinlock_irqsave_unlock(&readyqueues[core_id].lock);
|
||||
}
|
||||
|
||||
/** @brief A procedure to be called by
|
||||
* procedures which are called by exiting tasks. */
|
||||
|
||||
void NORETURN do_exit(int arg)
|
||||
{
|
||||
task_t* curr_task = per_core(current_task);
|
||||
|
@ -298,7 +409,7 @@ void NORETURN do_exit(int arg)
|
|||
}
|
||||
}
|
||||
|
||||
/** @brief A procedure to be called by kernel tasks */
|
||||
|
||||
void NORETURN leave_kernel_task(void) {
|
||||
int result;
|
||||
|
||||
|
@ -306,11 +417,12 @@ void NORETURN leave_kernel_task(void) {
|
|||
do_exit(result);
|
||||
}
|
||||
|
||||
/** @brief Aborting a task is like exiting it with result -1 */
|
||||
|
||||
void NORETURN do_abort(void) {
|
||||
do_exit(-1);
|
||||
}
|
||||
|
||||
|
||||
static uint32_t get_next_core_id(void)
|
||||
{
|
||||
uint32_t i;
|
||||
|
@ -334,6 +446,7 @@ static uint32_t get_next_core_id(void)
|
|||
return core_id;
|
||||
}
|
||||
|
||||
|
||||
int clone_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
|
@ -434,6 +547,7 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
|
||||
int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio, uint32_t core_id)
|
||||
{
|
||||
int ret = -ENOMEM;
|
||||
|
@ -537,6 +651,7 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
|
||||
int create_kernel_task_on_core(tid_t* id, entry_point_t ep, void* args, uint8_t prio, uint32_t core_id)
|
||||
{
|
||||
if (prio > MAX_PRIO)
|
||||
|
@ -545,6 +660,7 @@ int create_kernel_task_on_core(tid_t* id, entry_point_t ep, void* args, uint8_t
|
|||
return create_task(id, ep, args, prio, core_id);
|
||||
}
|
||||
|
||||
|
||||
int create_kernel_task(tid_t* id, entry_point_t ep, void* args, uint8_t prio)
|
||||
{
|
||||
if (prio > MAX_PRIO)
|
||||
|
@ -553,23 +669,17 @@ int create_kernel_task(tid_t* id, entry_point_t ep, void* args, uint8_t prio)
|
|||
return create_task(id, ep, args, prio, CORE_ID);
|
||||
}
|
||||
|
||||
/** @brief Wakeup a blocked task
|
||||
* @param id The task's tid_t structure
|
||||
* @return
|
||||
* - 0 on success
|
||||
* - -EINVAL (-22) on failure
|
||||
*/
|
||||
|
||||
int wakeup_task(tid_t id)
|
||||
{
|
||||
task_t* task;
|
||||
uint32_t core_id, prio;
|
||||
uint32_t core_id;
|
||||
int ret = -EINVAL;
|
||||
uint8_t flags;
|
||||
|
||||
flags = irq_nested_disable();
|
||||
|
||||
task = task_table + id;
|
||||
prio = task->prio;
|
||||
task = &task_table[id];
|
||||
core_id = task->last_core;
|
||||
|
||||
if (task->status == TASK_BLOCKED) {
|
||||
|
@ -577,56 +687,18 @@ int wakeup_task(tid_t id)
|
|||
ret = 0;
|
||||
|
||||
spinlock_irqsave_lock(&readyqueues[core_id].lock);
|
||||
// increase the number of ready tasks
|
||||
readyqueues[core_id].nr_tasks++;
|
||||
|
||||
// do we need to remove from timer queue?
|
||||
// if task is in timer queue, remove it
|
||||
if (task->flags & TASK_TIMER) {
|
||||
task->flags &= ~TASK_TIMER;
|
||||
if (task->prev)
|
||||
task->prev->next = task->next;
|
||||
if (task->next)
|
||||
task->next->prev = task->prev;
|
||||
if (readyqueues[core_id].timers.first == task) {
|
||||
readyqueues[core_id].timers.first = task->next;
|
||||
|
||||
#ifdef DYNAMIC_TICKS
|
||||
const task_t* first = readyqueues[core_id].timers.first;
|
||||
if(first) {
|
||||
if(first->timeout > get_clock_tick()) {
|
||||
timer_deadline(first->timeout - get_clock_tick());
|
||||
} else {
|
||||
// workaround: start timer so new head will be serviced
|
||||
timer_deadline(1);
|
||||
}
|
||||
} else {
|
||||
// prevent spurious interrupts
|
||||
timer_disable();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
if (readyqueues[core_id].timers.last == task)
|
||||
readyqueues[core_id].timers.last = task->prev;
|
||||
timer_queue_remove(core_id, task);
|
||||
}
|
||||
|
||||
// add task to the runqueue
|
||||
if (!readyqueues[core_id].queue[prio-1].last) {
|
||||
readyqueues[core_id].queue[prio-1].last = readyqueues[core_id].queue[prio-1].first = task;
|
||||
task->next = task->prev = NULL;
|
||||
readyqueues[core_id].prio_bitmap |= (1 << prio);
|
||||
} else {
|
||||
task->prev = readyqueues[core_id].queue[prio-1].last;
|
||||
task->next = NULL;
|
||||
readyqueues[core_id].queue[prio-1].last->next = task;
|
||||
readyqueues[core_id].queue[prio-1].last = task;
|
||||
}
|
||||
// add task to the ready queue
|
||||
readyqueues_push_back(core_id, task);
|
||||
|
||||
spinlock_irqsave_unlock(&readyqueues[core_id].lock);
|
||||
|
||||
#if 0 //def DYNAMIC_TICKS
|
||||
// send IPI to be sure that the scheuler recognize the new task
|
||||
if (core_id != CORE_ID)
|
||||
apic_send_ipi(core_id, 121);
|
||||
#endif
|
||||
}
|
||||
|
||||
irq_nested_enable(flags);
|
||||
|
@ -634,54 +706,30 @@ int wakeup_task(tid_t id)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/** @brief Block current task
|
||||
*
|
||||
* The current task's status will be changed to TASK_BLOCKED
|
||||
*
|
||||
* @return
|
||||
* - 0 on success
|
||||
* - -EINVAL (-22) on failure
|
||||
*/
|
||||
int block_current_task(void)
|
||||
|
||||
int block_task(tid_t id)
|
||||
{
|
||||
task_t* curr_task;
|
||||
tid_t id;
|
||||
uint32_t prio, core_id;
|
||||
task_t* task;
|
||||
uint32_t core_id;
|
||||
int ret = -EINVAL;
|
||||
uint8_t flags;
|
||||
|
||||
flags = irq_nested_disable();
|
||||
|
||||
curr_task = per_core(current_task);
|
||||
id = curr_task->id;
|
||||
prio = curr_task->prio;
|
||||
core_id = CORE_ID;
|
||||
task = &task_table[id];
|
||||
core_id = task->last_core;
|
||||
|
||||
if (task_table[id].status == TASK_RUNNING) {
|
||||
task_table[id].status = TASK_BLOCKED;
|
||||
ret = 0;
|
||||
if (task->status == TASK_RUNNING) {
|
||||
task->status = TASK_BLOCKED;
|
||||
|
||||
spinlock_irqsave_lock(&readyqueues[core_id].lock);
|
||||
// reduce the number of ready tasks
|
||||
readyqueues[core_id].nr_tasks--;
|
||||
|
||||
// remove task from queue
|
||||
if (task_table[id].prev)
|
||||
task_table[id].prev->next = task_table[id].next;
|
||||
if (task_table[id].next)
|
||||
task_table[id].next->prev = task_table[id].prev;
|
||||
if (readyqueues[core_id].queue[prio-1].first == task_table+id)
|
||||
readyqueues[core_id].queue[prio-1].first = task_table[id].next;
|
||||
if (readyqueues[core_id].queue[prio-1].last == task_table+id) {
|
||||
readyqueues[core_id].queue[prio-1].last = task_table[id].prev;
|
||||
if (!readyqueues[core_id].queue[prio-1].last)
|
||||
readyqueues[core_id].queue[prio-1].last = readyqueues[core_id].queue[prio-1].first;
|
||||
}
|
||||
// remove task from ready queue
|
||||
readyqueues_remove(core_id, task);
|
||||
|
||||
// No valid task in queue => update prio_bitmap
|
||||
if (!readyqueues[core_id].queue[prio-1].first)
|
||||
readyqueues[core_id].prio_bitmap &= ~(1 << prio);
|
||||
spinlock_irqsave_unlock(&readyqueues[core_id].lock);
|
||||
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
irq_nested_enable(flags);
|
||||
|
@ -689,148 +737,71 @@ int block_current_task(void)
|
|||
return ret;
|
||||
}
|
||||
|
||||
|
||||
int block_current_task(void)
|
||||
{
|
||||
return block_task(per_core(current_task)->id);
|
||||
}
|
||||
|
||||
|
||||
int set_timer(uint64_t deadline)
|
||||
{
|
||||
task_t* curr_task;
|
||||
task_t* tmp;
|
||||
uint32_t core_id, prio;
|
||||
uint32_t flags;
|
||||
uint32_t core_id;
|
||||
uint8_t flags;
|
||||
int ret = -EINVAL;
|
||||
|
||||
flags = irq_nested_disable();
|
||||
|
||||
curr_task = per_core(current_task);
|
||||
prio = curr_task->prio;
|
||||
core_id = CORE_ID;
|
||||
|
||||
if (curr_task->status == TASK_RUNNING) {
|
||||
curr_task->status = TASK_BLOCKED;
|
||||
curr_task->timeout = deadline;
|
||||
// blocks task and removes from ready queue
|
||||
block_task(curr_task->id);
|
||||
|
||||
curr_task->flags |= TASK_TIMER;
|
||||
curr_task->timeout = deadline;
|
||||
|
||||
timer_queue_push(core_id, curr_task);
|
||||
|
||||
ret = 0;
|
||||
|
||||
spinlock_irqsave_lock(&readyqueues[core_id].lock);
|
||||
|
||||
// reduce the number of ready tasks
|
||||
readyqueues[core_id].nr_tasks--;
|
||||
|
||||
// remove task from queue
|
||||
if (curr_task->prev)
|
||||
curr_task->prev->next = curr_task->next;
|
||||
if (curr_task->next)
|
||||
curr_task->next->prev = curr_task->prev;
|
||||
if (readyqueues[core_id].queue[prio-1].first == curr_task)
|
||||
readyqueues[core_id].queue[prio-1].first = curr_task->next;
|
||||
if (readyqueues[core_id].queue[prio-1].last == curr_task) {
|
||||
readyqueues[core_id].queue[prio-1].last = curr_task->prev;
|
||||
if (!readyqueues[core_id].queue[prio-1].last)
|
||||
readyqueues[core_id].queue[prio-1].last = readyqueues[core_id].queue[prio-1].first;
|
||||
}
|
||||
|
||||
// No valid task in queue => update prio_bitmap
|
||||
if (!readyqueues[core_id].queue[prio-1].first)
|
||||
readyqueues[core_id].prio_bitmap &= ~(1 << prio);
|
||||
|
||||
// add task to the timer queue
|
||||
tmp = readyqueues[core_id].timers.first;
|
||||
if (!tmp) {
|
||||
readyqueues[core_id].timers.first = readyqueues[core_id].timers.last = curr_task;
|
||||
curr_task->prev = curr_task->next = NULL;
|
||||
#ifdef DYNAMIC_TICKS
|
||||
timer_deadline(deadline-get_clock_tick());
|
||||
#endif
|
||||
} else {
|
||||
while(tmp && (deadline >= tmp->timeout))
|
||||
tmp = tmp->next;
|
||||
|
||||
if (!tmp) {
|
||||
curr_task->next = NULL;
|
||||
curr_task->prev = readyqueues[core_id].timers.last;
|
||||
if (readyqueues[core_id].timers.last)
|
||||
readyqueues[core_id].timers.last->next = curr_task;
|
||||
readyqueues[core_id].timers.last = curr_task;
|
||||
// obsolete lines...
|
||||
//if (!readyqueues[core_id].timers.first)
|
||||
// readyqueues[core_id].timers.first = curr_task;
|
||||
} else {
|
||||
curr_task->prev = tmp->prev;
|
||||
curr_task->next = tmp;
|
||||
tmp->prev = curr_task;
|
||||
if (curr_task->prev)
|
||||
curr_task->prev->next = curr_task;
|
||||
if (readyqueues[core_id].timers.first == tmp) {
|
||||
readyqueues[core_id].timers.first = curr_task;
|
||||
#ifdef DYNAMIC_TICKS
|
||||
timer_deadline(deadline-get_clock_tick());
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
spinlock_irqsave_unlock(&readyqueues[core_id].lock);
|
||||
} else kprintf("Task is already blocked. No timer will be set!\n");
|
||||
} else {
|
||||
kprintf("Task is already blocked. No timer will be set!\n");
|
||||
}
|
||||
|
||||
irq_nested_enable(flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
void check_timers(void)
|
||||
{
|
||||
uint32_t core_id = CORE_ID;
|
||||
uint32_t prio;
|
||||
uint64_t current_tick;
|
||||
readyqueues_t* readyqueue = &readyqueues[CORE_ID];
|
||||
spinlock_irqsave_lock(&readyqueue->lock);
|
||||
|
||||
spinlock_irqsave_lock(&readyqueues[core_id].lock);
|
||||
// since IRQs are disabled, get_clock_tick() won't increase here
|
||||
const uint64_t current_tick = get_clock_tick();
|
||||
|
||||
// check timers
|
||||
current_tick = get_clock_tick();
|
||||
while (readyqueues[core_id].timers.first && readyqueues[core_id].timers.first->timeout <= current_tick)
|
||||
// wakeup tasks whose deadline has expired
|
||||
task_t* task;
|
||||
while ((task = readyqueue->timers.first) && (task->timeout <= current_tick))
|
||||
{
|
||||
task_t* task = readyqueues[core_id].timers.first;
|
||||
|
||||
// remove timer from queue
|
||||
readyqueues[core_id].timers.first = readyqueues[core_id].timers.first->next;
|
||||
if (readyqueues[core_id].timers.first) {
|
||||
readyqueues[core_id].timers.first->prev = NULL;
|
||||
#ifdef DYNAMIC_TICKS
|
||||
if (readyqueues[core_id].timers.first->timeout > get_clock_tick())
|
||||
timer_deadline(readyqueues[core_id].timers.first->timeout-current_tick);
|
||||
#endif
|
||||
} else readyqueues[core_id].timers.last = NULL;
|
||||
task->flags &= ~TASK_TIMER;
|
||||
|
||||
// wakeup task
|
||||
if (task->status == TASK_BLOCKED) {
|
||||
task->status = TASK_READY;
|
||||
prio = task->prio;
|
||||
|
||||
// increase the number of ready tasks
|
||||
readyqueues[core_id].nr_tasks++;
|
||||
|
||||
// add task to the runqueue
|
||||
if (!readyqueues[core_id].queue[prio-1].first) {
|
||||
readyqueues[core_id].queue[prio-1].last = readyqueues[core_id].queue[prio-1].first = task;
|
||||
task->next = task->prev = NULL;
|
||||
readyqueues[core_id].prio_bitmap |= (1 << prio);
|
||||
} else {
|
||||
task->prev = readyqueues[core_id].queue[prio-1].last;
|
||||
task->next = NULL;
|
||||
readyqueues[core_id].queue[prio-1].last->next = task;
|
||||
readyqueues[core_id].queue[prio-1].last = task;
|
||||
}
|
||||
}
|
||||
// pops task from timer queue, so next iteration has new first element
|
||||
wakeup_task(task->id);
|
||||
}
|
||||
|
||||
spinlock_irqsave_unlock(&readyqueues[core_id].lock);
|
||||
spinlock_irqsave_unlock(&readyqueue->lock);
|
||||
}
|
||||
|
||||
|
||||
size_t** scheduler(void)
|
||||
{
|
||||
task_t* orig_task;
|
||||
task_t* curr_task;
|
||||
const int32_t core_id = CORE_ID;
|
||||
uint32_t prio;
|
||||
const uint32_t core_id = CORE_ID;
|
||||
uint64_t prio;
|
||||
|
||||
orig_task = curr_task = per_core(current_task);
|
||||
curr_task->last_core = core_id;
|
||||
|
@ -850,8 +821,12 @@ size_t** scheduler(void)
|
|||
set_per_core(current_task, curr_task);
|
||||
}
|
||||
|
||||
prio = msb(readyqueues[core_id].prio_bitmap); // determines highest priority
|
||||
if (prio > MAX_PRIO) {
|
||||
// determine highest priority
|
||||
prio = msb(readyqueues[core_id].prio_bitmap);
|
||||
|
||||
const int readyqueue_empty = prio > MAX_PRIO;
|
||||
if (readyqueue_empty) {
|
||||
|
||||
if ((curr_task->status == TASK_RUNNING) || (curr_task->status == TASK_IDLE))
|
||||
goto get_task_out;
|
||||
curr_task = readyqueues[core_id].idle;
|
||||
|
@ -861,26 +836,33 @@ size_t** scheduler(void)
|
|||
if ((curr_task->prio > prio) && (curr_task->status == TASK_RUNNING))
|
||||
goto get_task_out;
|
||||
|
||||
// mark current task for later cleanup by finish_task_switch()
|
||||
if (curr_task->status == TASK_RUNNING) {
|
||||
curr_task->status = TASK_READY;
|
||||
readyqueues[core_id].old_task = curr_task;
|
||||
}
|
||||
|
||||
curr_task = readyqueues[core_id].queue[prio-1].first;
|
||||
set_per_core(current_task, curr_task);
|
||||
if (BUILTIN_EXPECT(curr_task->status == TASK_INVALID, 0)) {
|
||||
kprintf("Upps!!!!!!! Got invalid task %d, orig task %d\n", curr_task->id, orig_task->id);
|
||||
}
|
||||
curr_task->status = TASK_RUNNING;
|
||||
// get new task from its ready queue
|
||||
curr_task = task_list_pop_front(&readyqueues[core_id].queue[prio-1]);
|
||||
|
||||
// remove new task from queue
|
||||
// by the way, priority 0 is only used by the idle task and doesn't need own queue
|
||||
readyqueues[core_id].queue[prio-1].first = curr_task->next;
|
||||
if (!curr_task->next) {
|
||||
readyqueues[core_id].queue[prio-1].last = NULL;
|
||||
if(BUILTIN_EXPECT(curr_task == NULL, 0)) {
|
||||
kprintf("Kernel panic: No task in readyqueue\n");
|
||||
while(1);
|
||||
}
|
||||
if (BUILTIN_EXPECT(curr_task->status == TASK_INVALID, 0)) {
|
||||
kprintf("Kernel panic: Got invalid task %d, orig task %d\n",
|
||||
curr_task->id, orig_task->id);
|
||||
while(1);
|
||||
}
|
||||
|
||||
// if we removed the last task from queue, update priority bitmap
|
||||
if(readyqueues[core_id].queue[prio-1].first == NULL) {
|
||||
readyqueues[core_id].prio_bitmap &= ~(1 << prio);
|
||||
}
|
||||
curr_task->next = curr_task->prev = NULL;
|
||||
|
||||
// finally make it the new current task
|
||||
curr_task->status = TASK_RUNNING;
|
||||
set_per_core(current_task, curr_task);
|
||||
}
|
||||
|
||||
get_task_out:
|
||||
|
@ -915,6 +897,7 @@ int get_task(tid_t id, task_t** task)
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void reschedule(void)
|
||||
{
|
||||
size_t** stack;
|
||||
|
|
|
@ -38,7 +38,7 @@
|
|||
/// A linked list for each binary size exponent
|
||||
static buddy_t* buddy_lists[BUDDY_LISTS] = { [0 ... BUDDY_LISTS-1] = NULL };
|
||||
/// Lock for the buddy lists
|
||||
static spinlock_t buddy_lock = SPINLOCK_INIT;
|
||||
static spinlock_irqsave_t buddy_lock = SPINLOCK_IRQSAVE_INIT;
|
||||
|
||||
/** @brief Check if larger free buddies are available */
|
||||
static inline int buddy_large_avail(uint8_t exp)
|
||||
|
@ -66,7 +66,7 @@ static inline int buddy_exp(size_t sz)
|
|||
/** @brief Get a free buddy by potentially splitting a larger one */
|
||||
static buddy_t* buddy_get(int exp)
|
||||
{
|
||||
spinlock_lock(&buddy_lock);
|
||||
spinlock_irqsave_lock(&buddy_lock);
|
||||
buddy_t** list = &buddy_lists[exp-BUDDY_MIN];
|
||||
buddy_t* buddy = *list;
|
||||
buddy_t* split;
|
||||
|
@ -92,7 +92,7 @@ static buddy_t* buddy_get(int exp)
|
|||
}
|
||||
|
||||
out:
|
||||
spinlock_unlock(&buddy_lock);
|
||||
spinlock_irqsave_unlock(&buddy_lock);
|
||||
|
||||
return buddy;
|
||||
}
|
||||
|
@ -103,11 +103,11 @@ out:
|
|||
*/
|
||||
static void buddy_put(buddy_t* buddy)
|
||||
{
|
||||
spinlock_lock(&buddy_lock);
|
||||
spinlock_irqsave_lock(&buddy_lock);
|
||||
buddy_t** list = &buddy_lists[buddy->prefix.exponent-BUDDY_MIN];
|
||||
buddy->next = *list;
|
||||
*list = buddy;
|
||||
spinlock_unlock(&buddy_lock);
|
||||
spinlock_irqsave_unlock(&buddy_lock);
|
||||
}
|
||||
|
||||
void buddy_dump(void)
|
||||
|
|
|
@ -48,7 +48,7 @@ extern const void kernel_end;
|
|||
*/
|
||||
static vma_t vma_boot = { VMA_MIN, VMA_MIN, VMA_HEAP };
|
||||
static vma_t* vma_list = &vma_boot;
|
||||
static spinlock_t vma_lock = SPINLOCK_INIT;
|
||||
static spinlock_irqsave_t vma_lock = SPINLOCK_IRQSAVE_INIT;
|
||||
|
||||
// TODO: we might move the architecture specific VMA regions to a
|
||||
// seperate function arch_vma_init()
|
||||
|
@ -86,7 +86,7 @@ out:
|
|||
|
||||
size_t vma_alloc(size_t size, uint32_t flags)
|
||||
{
|
||||
spinlock_t* lock = &vma_lock;
|
||||
spinlock_irqsave_t* lock = &vma_lock;
|
||||
vma_t** list = &vma_list;
|
||||
|
||||
//kprintf("vma_alloc: size = %#lx, flags = %#x\n", size, flags);
|
||||
|
@ -98,7 +98,7 @@ size_t vma_alloc(size_t size, uint32_t flags)
|
|||
size_t base = VMA_MIN;
|
||||
size_t limit = VMA_MAX;
|
||||
|
||||
spinlock_lock(lock);
|
||||
spinlock_irqsave_lock(lock);
|
||||
|
||||
// first fit search for free memory area
|
||||
vma_t* pred = NULL; // vma before current gap
|
||||
|
@ -115,7 +115,7 @@ size_t vma_alloc(size_t size, uint32_t flags)
|
|||
} while (pred || succ);
|
||||
|
||||
fail:
|
||||
spinlock_unlock(lock); // we were unlucky to find a free gap
|
||||
spinlock_irqsave_unlock(lock); // we were unlucky to find a free gap
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -143,14 +143,14 @@ found:
|
|||
*list = new;
|
||||
}
|
||||
|
||||
spinlock_unlock(lock);
|
||||
spinlock_irqsave_unlock(lock);
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
int vma_free(size_t start, size_t end)
|
||||
{
|
||||
spinlock_t* lock = &vma_lock;
|
||||
spinlock_irqsave_t* lock = &vma_lock;
|
||||
vma_t* vma;
|
||||
vma_t** list = &vma_list;
|
||||
|
||||
|
@ -159,7 +159,7 @@ int vma_free(size_t start, size_t end)
|
|||
if (BUILTIN_EXPECT(start >= end, 0))
|
||||
return -EINVAL;
|
||||
|
||||
spinlock_lock(lock);
|
||||
spinlock_irqsave_lock(lock);
|
||||
|
||||
// search vma
|
||||
vma = *list;
|
||||
|
@ -169,7 +169,7 @@ int vma_free(size_t start, size_t end)
|
|||
}
|
||||
|
||||
if (BUILTIN_EXPECT(!vma, 0)) {
|
||||
spinlock_unlock(lock);
|
||||
spinlock_irqsave_unlock(lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -190,7 +190,7 @@ int vma_free(size_t start, size_t end)
|
|||
else {
|
||||
vma_t* new = kmalloc(sizeof(vma_t));
|
||||
if (BUILTIN_EXPECT(!new, 0)) {
|
||||
spinlock_unlock(lock);
|
||||
spinlock_irqsave_unlock(lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -205,14 +205,14 @@ int vma_free(size_t start, size_t end)
|
|||
new->prev = vma;
|
||||
}
|
||||
|
||||
spinlock_unlock(lock);
|
||||
spinlock_irqsave_unlock(lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vma_add(size_t start, size_t end, uint32_t flags)
|
||||
{
|
||||
spinlock_t* lock = &vma_lock;
|
||||
spinlock_irqsave_t* lock = &vma_lock;
|
||||
vma_t** list = &vma_list;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -221,7 +221,7 @@ int vma_add(size_t start, size_t end, uint32_t flags)
|
|||
|
||||
//kprintf("vma_add: start = %#lx, end = %#lx, flags = %#x\n", start, end, flags);
|
||||
|
||||
spinlock_lock(lock);
|
||||
spinlock_irqsave_lock(lock);
|
||||
|
||||
// search gap
|
||||
vma_t* pred = NULL;
|
||||
|
@ -267,7 +267,7 @@ int vma_add(size_t start, size_t end, uint32_t flags)
|
|||
}
|
||||
|
||||
fail:
|
||||
spinlock_unlock(lock);
|
||||
spinlock_irqsave_unlock(lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -286,7 +286,7 @@ void vma_dump(void)
|
|||
}
|
||||
|
||||
kputs("VMAs:\n");
|
||||
spinlock_lock(&vma_lock);
|
||||
spinlock_irqsave_lock(&vma_lock);
|
||||
print_vma(&vma_boot);
|
||||
spinlock_unlock(&vma_lock);
|
||||
spinlock_irqsave_unlock(&vma_lock);
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue