mirror of
https://github.com/hermitcore/libhermit.git
synced 2025-03-09 00:00:03 +01:00
prepare kernel to support SMP
- only startup code is missing
This commit is contained in:
parent
a48835c28e
commit
f781f4923b
17 changed files with 226 additions and 145 deletions
|
@ -134,6 +134,10 @@ struct state {
|
|||
uint64_t ss;
|
||||
};
|
||||
|
||||
uint32_t apic_cpu_id(void);
|
||||
|
||||
#define smp_id apic_cpu_id
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -177,6 +177,9 @@ uint32_t apic_cpu_id(void)
|
|||
if (apic_is_enabled())
|
||||
return ((lapic_read(APIC_ID)) >> 24);
|
||||
|
||||
if (boot_processor >= 0)
|
||||
return boot_processor;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -571,10 +571,6 @@ common_switch:
|
|||
or eax, 8
|
||||
mov cr0, rax
|
||||
|
||||
; set rsp0 in the task state segment
|
||||
extern set_kernel_stack
|
||||
call set_kernel_stack
|
||||
|
||||
; call cleanup code
|
||||
call finish_task_switch
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@
|
|||
#include <asm/tss.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
gdt_ptr_t gp;
|
||||
gdt_ptr_t gp;
|
||||
static tss_t task_state_segment __attribute__ ((aligned (PAGE_SIZE)));
|
||||
// currently, our kernel has full access to the ioports
|
||||
static gdt_entry_t gdt[GDT_ENTRIES] = {[0 ... GDT_ENTRIES-1] = {0, 0, 0, 0, 0, 0}};
|
||||
|
@ -48,18 +48,11 @@ extern void gdt_flush(void);
|
|||
|
||||
extern const void boot_stack;
|
||||
|
||||
void set_kernel_stack(void)
|
||||
{
|
||||
task_t* curr_task = current_task;
|
||||
|
||||
task_state_segment.rsp0 = (size_t) curr_task->stack + KERNEL_STACK_SIZE - 16; // => stack is 16byte aligned
|
||||
}
|
||||
|
||||
size_t get_kernel_stack(void)
|
||||
{
|
||||
task_t* curr_task = current_task;
|
||||
task_t* curr_task = per_core(current_task);
|
||||
|
||||
return (size_t) curr_task->stack + KERNEL_STACK_SIZE - 16;
|
||||
return (size_t) curr_task->stack + KERNEL_STACK_SIZE - 16; // => stack is 16byte aligned
|
||||
}
|
||||
|
||||
/* Setup a descriptor in the Global Descriptor Table */
|
||||
|
|
|
@ -254,7 +254,7 @@ size_t** irq_handler(struct state *s)
|
|||
// timer interrupt?
|
||||
if ((s->int_no == 32) || (s->int_no == 123))
|
||||
ret = scheduler(); // switch to a new task
|
||||
else if ((s->int_no >= 32) && (get_highest_priority() > current_task->prio))
|
||||
else if ((s->int_no >= 32) && (get_highest_priority() > per_core(current_task)->prio))
|
||||
ret = scheduler();
|
||||
else kprintf("Receive IRQ %d\n", s->int_no);
|
||||
|
||||
|
|
|
@ -174,7 +174,7 @@ void isrs_install(void)
|
|||
|
||||
static void fpu_handler(struct state *s)
|
||||
{
|
||||
task_t* task = current_task;
|
||||
task_t* task = per_core(current_task);
|
||||
|
||||
asm volatile ("clts"); // clear the TS flag of cr0
|
||||
if (!(task->flags & TASK_FPU_INIT)) {
|
||||
|
|
|
@ -39,7 +39,7 @@
|
|||
|
||||
size_t* get_current_stack(void)
|
||||
{
|
||||
task_t* curr_task = current_task;
|
||||
task_t* curr_task = per_core(current_task);
|
||||
|
||||
// use new page table
|
||||
write_cr3(curr_task->page_map);
|
||||
|
@ -65,7 +65,7 @@ int create_default_frame(task_t* task, entry_point_t ep, void* arg)
|
|||
* and not for HW-task-switching is setting up a stack and not a TSS.
|
||||
* This is the stack which will be activated and popped off for iret later.
|
||||
*/
|
||||
stack = (size_t*) (task->stack + KERNEL_STACK_SIZE - 16); // => stack is 16byte aligned
|
||||
stack = (size_t*) (task->stack + KERNEL_STACK_SIZE - 16); // => stack is 16byte aligned
|
||||
|
||||
/* Only marker for debugging purposes, ... */
|
||||
*stack-- = 0xDEADBEEF;
|
||||
|
|
|
@ -109,6 +109,7 @@ int page_map(size_t viraddr, size_t phyaddr, size_t npages, size_t bits)
|
|||
int lvl, ret = -ENOMEM;
|
||||
long vpn = viraddr >> PAGE_BITS;
|
||||
long first[PAGE_LEVELS], last[PAGE_LEVELS];
|
||||
task_t* curr_task;
|
||||
|
||||
/* Calculate index boundaries for page map traversal */
|
||||
for (lvl=0; lvl<PAGE_LEVELS; lvl++) {
|
||||
|
@ -116,9 +117,11 @@ int page_map(size_t viraddr, size_t phyaddr, size_t npages, size_t bits)
|
|||
last[lvl] = (vpn+npages-1) >> (lvl * PAGE_MAP_BITS);
|
||||
}
|
||||
|
||||
curr_task = per_core(current_task);
|
||||
|
||||
/** @todo: might not be sufficient! */
|
||||
if (bits & PG_USER)
|
||||
spinlock_irqsave_lock(¤t_task->page_lock);
|
||||
spinlock_irqsave_lock(&curr_task->page_lock);
|
||||
else
|
||||
spinlock_lock(&kslock);
|
||||
|
||||
|
@ -135,7 +138,7 @@ int page_map(size_t viraddr, size_t phyaddr, size_t npages, size_t bits)
|
|||
goto out;
|
||||
|
||||
if (bits & PG_USER)
|
||||
atomic_int32_inc(¤t_task->user_usage);
|
||||
atomic_int32_inc(&curr_task->user_usage);
|
||||
|
||||
/* Reference the new table within its parent */
|
||||
#if 0
|
||||
|
@ -163,7 +166,7 @@ int page_map(size_t viraddr, size_t phyaddr, size_t npages, size_t bits)
|
|||
ret = 0;
|
||||
out:
|
||||
if (bits & PG_USER)
|
||||
spinlock_irqsave_unlock(¤t_task->page_lock);
|
||||
spinlock_irqsave_unlock(&curr_task->page_lock);
|
||||
else
|
||||
spinlock_unlock(&kslock);
|
||||
|
||||
|
@ -173,9 +176,11 @@ out:
|
|||
/** Tables are freed by page_map_drop() */
|
||||
int page_unmap(size_t viraddr, size_t npages)
|
||||
{
|
||||
task_t* curr_task = per_core(current_task);
|
||||
|
||||
/* We aquire both locks for kernel and task tables
|
||||
* as we dont know to which the region belongs. */
|
||||
spinlock_irqsave_lock(¤t_task->page_lock);
|
||||
spinlock_irqsave_lock(&curr_task->page_lock);
|
||||
spinlock_lock(&kslock);
|
||||
|
||||
/* Start iterating through the entries.
|
||||
|
@ -184,7 +189,7 @@ int page_unmap(size_t viraddr, size_t npages)
|
|||
for (vpn=start; vpn<start+npages; vpn++)
|
||||
self[0][vpn] = 0;
|
||||
|
||||
spinlock_irqsave_unlock(¤t_task->page_lock);
|
||||
spinlock_irqsave_unlock(&curr_task->page_lock);
|
||||
spinlock_unlock(&kslock);
|
||||
|
||||
/* This can't fail because we don't make checks here */
|
||||
|
@ -193,6 +198,8 @@ int page_unmap(size_t viraddr, size_t npages)
|
|||
|
||||
int page_map_drop(void)
|
||||
{
|
||||
task_t* curr_task = per_core(current_task);
|
||||
|
||||
void traverse(int lvl, long vpn) {
|
||||
long stop;
|
||||
for (stop=vpn+PAGE_MAP_ENTRIES; vpn<stop; vpn++) {
|
||||
|
@ -202,16 +209,16 @@ int page_map_drop(void)
|
|||
traverse(lvl-1, vpn<<PAGE_MAP_BITS);
|
||||
|
||||
put_pages(self[lvl][vpn] & PAGE_MASK, 1);
|
||||
atomic_int32_dec(¤t_task->user_usage);
|
||||
atomic_int32_dec(&curr_task->user_usage);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
spinlock_irqsave_lock(¤t_task->page_lock);
|
||||
spinlock_irqsave_lock(&curr_task->page_lock);
|
||||
|
||||
traverse(PAGE_LEVELS-1, 0);
|
||||
|
||||
spinlock_irqsave_unlock(¤t_task->page_lock);
|
||||
spinlock_irqsave_unlock(&curr_task->page_lock);
|
||||
|
||||
/* This can't fail because we don't make checks here */
|
||||
return 0;
|
||||
|
@ -219,6 +226,8 @@ int page_map_drop(void)
|
|||
|
||||
int page_map_copy(task_t *dest)
|
||||
{
|
||||
task_t* curr_task = per_core(current_task);
|
||||
|
||||
int traverse(int lvl, long vpn) {
|
||||
long stop;
|
||||
for (stop=vpn+PAGE_MAP_ENTRIES; vpn<stop; vpn++) {
|
||||
|
@ -249,14 +258,14 @@ int page_map_copy(task_t *dest)
|
|||
return 0;
|
||||
}
|
||||
|
||||
spinlock_irqsave_lock(¤t_task->page_lock);
|
||||
spinlock_irqsave_lock(&curr_task->page_lock);
|
||||
self[PAGE_LEVELS-1][PAGE_MAP_ENTRIES-2] = dest->page_map | PG_PRESENT | PG_SELF | PG_RW;
|
||||
|
||||
int ret = traverse(PAGE_LEVELS-1, 0);
|
||||
|
||||
other[PAGE_LEVELS-1][PAGE_MAP_ENTRIES-1] = dest->page_map | PG_PRESENT | PG_SELF | PG_RW;
|
||||
self [PAGE_LEVELS-1][PAGE_MAP_ENTRIES-2] = 0;
|
||||
spinlock_irqsave_unlock(¤t_task->page_lock);
|
||||
spinlock_irqsave_unlock(&curr_task->page_lock);
|
||||
|
||||
/* Flush TLB entries of 'other' self-reference */
|
||||
flush_tlb();
|
||||
|
@ -267,7 +276,7 @@ int page_map_copy(task_t *dest)
|
|||
void page_fault_handler(struct state *s)
|
||||
{
|
||||
size_t viraddr = read_cr2();
|
||||
task_t* task = current_task;
|
||||
task_t* task = per_core(current_task);
|
||||
|
||||
// on demand userspace heap mapping
|
||||
if ((task->heap) && (viraddr >= task->heap->start) && (viraddr < task->heap->end)) {
|
||||
|
@ -293,23 +302,13 @@ void page_fault_handler(struct state *s)
|
|||
}
|
||||
|
||||
default_handler:
|
||||
#if 0
|
||||
kprintf("Page Fault Exception (%d) at cs:ip = %#x:%#lx, task = %u, addr = %#lx, error = %#x [ %s %s %s %s %s ]\n",
|
||||
s->int_no, s->cs, s->eip, current_task->id, viraddr, s->error,
|
||||
s->int_no, s->cs, s->rip, task->id, viraddr, s->error,
|
||||
(s->error & 0x4) ? "user" : "supervisor",
|
||||
(s->error & 0x10) ? "instruction" : "data",
|
||||
(s->error & 0x2) ? "write" : ((s->error & 0x10) ? "fetch" : "read"),
|
||||
(s->error & 0x1) ? "protection" : "not present",
|
||||
(s->error & 0x8) ? "reserved bit" : "\b");
|
||||
#else
|
||||
kprintf("Page Fault Exception (%d) at cs:ip = %#x:%#lx, task = %u, addr = %#lx, error = %#x [ %s %s %s %s %s ]\n",
|
||||
s->int_no, s->cs, s->rip, current_task->id, viraddr, s->error,
|
||||
(s->error & 0x4) ? "user" : "supervisor",
|
||||
(s->error & 0x10) ? "instruction" : "data",
|
||||
(s->error & 0x2) ? "write" : ((s->error & 0x10) ? "fetch" : "read"),
|
||||
(s->error & 0x1) ? "protection" : "not present",
|
||||
(s->error & 0x8) ? "reserved bit" : "\b");
|
||||
#endif
|
||||
|
||||
while(1) HALT;
|
||||
}
|
||||
|
|
|
@ -89,21 +89,27 @@ inline static int spinlock_destroy(spinlock_t* s) {
|
|||
*/
|
||||
inline static int spinlock_lock(spinlock_t* s) {
|
||||
int32_t ticket;
|
||||
task_t* curr_task;
|
||||
|
||||
if (BUILTIN_EXPECT(!s, 0))
|
||||
return -EINVAL;
|
||||
|
||||
if (s->owner == current_task->id) {
|
||||
curr_task = per_core(current_task);
|
||||
if (s->owner == curr_task->id) {
|
||||
s->counter++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
ticket = atomic_int32_add(&s->queue, 1);
|
||||
#if 1
|
||||
ticket = atomic_int32_inc(&s->queue);
|
||||
while(atomic_int32_read(&s->dequeue) != ticket) {
|
||||
PAUSE;
|
||||
NOP1;
|
||||
}
|
||||
s->owner = current_task->id;
|
||||
s->owner = curr_task->id;
|
||||
s->counter = 1;
|
||||
#else
|
||||
while( atomic_int32_test_and_set(&s->dequeue,0) );
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -120,7 +126,11 @@ inline static int spinlock_unlock(spinlock_t* s) {
|
|||
s->counter--;
|
||||
if (!s->counter) {
|
||||
s->owner = MAX_TASKS;
|
||||
#if 1
|
||||
atomic_int32_inc(&s->dequeue);
|
||||
#else
|
||||
atomic_int32_set(&s->dequeue,1);
|
||||
#endif
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -141,6 +151,7 @@ inline static int spinlock_irqsave_init(spinlock_irqsave_t* s) {
|
|||
atomic_int32_set(&s->queue, 0);
|
||||
atomic_int32_set(&s->dequeue, 1);
|
||||
s->flags = 0;
|
||||
s->coreid = (uint32_t)-1;
|
||||
s->counter = 0;
|
||||
|
||||
return 0;
|
||||
|
@ -156,6 +167,7 @@ inline static int spinlock_irqsave_destroy(spinlock_irqsave_t* s) {
|
|||
return -EINVAL;
|
||||
|
||||
s->flags = 0;
|
||||
s->coreid = (uint32_t)-1;
|
||||
s->counter = 0;
|
||||
|
||||
return 0;
|
||||
|
@ -167,23 +179,28 @@ inline static int spinlock_irqsave_destroy(spinlock_irqsave_t* s) {
|
|||
* - -EINVAL (-22) on failure
|
||||
*/
|
||||
inline static int spinlock_irqsave_lock(spinlock_irqsave_t* s) {
|
||||
uint32_t flags;
|
||||
int32_t ticket;
|
||||
uint8_t flags;
|
||||
|
||||
if (BUILTIN_EXPECT(!s, 0))
|
||||
return -EINVAL;
|
||||
|
||||
flags = irq_nested_disable();
|
||||
if (s->counter == 1) {
|
||||
if (s->coreid == CORE_ID) {
|
||||
s->counter++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
ticket = atomic_int32_add(&s->queue, 1);
|
||||
#if 1
|
||||
ticket = atomic_int32_inc(&s->queue);
|
||||
while (atomic_int32_read(&s->dequeue) != ticket) {
|
||||
PAUSE;
|
||||
NOP1;
|
||||
}
|
||||
#else
|
||||
while( atomic_int32_test_and_set(&s->dequeue,0) );
|
||||
#endif
|
||||
|
||||
s->coreid = CORE_ID;
|
||||
s->flags = flags;
|
||||
s->counter = 1;
|
||||
|
||||
|
@ -196,7 +213,7 @@ inline static int spinlock_irqsave_lock(spinlock_irqsave_t* s) {
|
|||
* - -EINVAL (-22) on failure
|
||||
*/
|
||||
inline static int spinlock_irqsave_unlock(spinlock_irqsave_t* s) {
|
||||
uint8_t flags;
|
||||
uint32_t flags;
|
||||
|
||||
if (BUILTIN_EXPECT(!s, 0))
|
||||
return -EINVAL;
|
||||
|
@ -204,8 +221,13 @@ inline static int spinlock_irqsave_unlock(spinlock_irqsave_t* s) {
|
|||
s->counter--;
|
||||
if (!s->counter) {
|
||||
flags = s->flags;
|
||||
s->coreid = (uint32_t) -1;
|
||||
s->flags = 0;
|
||||
atomic_int32_inc(&s->dequeue);
|
||||
#if 1
|
||||
atomic_int32_inc(&s->dequeue);
|
||||
#else
|
||||
atomic_int32_set(&s->dequeue,1);
|
||||
#endif
|
||||
irq_nested_enable(flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -41,7 +41,6 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
/** @brief Spinlock structure */
|
||||
typedef struct spinlock {
|
||||
/// Internal queue
|
||||
|
@ -59,6 +58,8 @@ typedef struct spinlock_irqsave {
|
|||
atomic_int32_t queue;
|
||||
/// Internal dequeue
|
||||
atomic_int32_t dequeue;
|
||||
/// Core Id of the lock owner
|
||||
uint32_t coreid;
|
||||
/// Internal counter var
|
||||
uint32_t counter;
|
||||
/// Interrupt flag
|
||||
|
@ -68,7 +69,7 @@ typedef struct spinlock_irqsave {
|
|||
/// Macro for spinlock initialization
|
||||
#define SPINLOCK_INIT { ATOMIC_INIT(0), ATOMIC_INIT(1), MAX_TASKS, 0}
|
||||
/// Macro for irqsave spinlock initialization
|
||||
#define SPINLOCK_IRQSAVE_INIT { ATOMIC_INIT(0), ATOMIC_INIT(1), 0, 0}
|
||||
#define SPINLOCK_IRQSAVE_INIT { ATOMIC_INIT(0), ATOMIC_INIT(1), (uint32_t)-1, 0, 0}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
|
||||
#include <hermit/config.h>
|
||||
#include <asm/stddef.h>
|
||||
#include <asm/irqflags.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
@ -46,9 +47,42 @@ extern "C" {
|
|||
/// represents a task identifier
|
||||
typedef unsigned int tid_t;
|
||||
|
||||
#if MAX_CORES == 1
|
||||
#define per_core(name) name
|
||||
#define DECLARE_PER_CORE(type, name) extern type name;
|
||||
#define DEFINE_PER_CORE(type, name, def_value) type name = def_value;
|
||||
#define DEFINE_PER_CORE_STATIC(type, name, def_value) static type name = def_value;
|
||||
#define CORE_ID 0
|
||||
#else
|
||||
#define per_core(name) (*__get_percore_##name())
|
||||
#define DECLARE_PER_CORE(type, name) \
|
||||
typedef struct { type var __attribute__ ((aligned (CACHE_LINE))); } aligned_##name;\
|
||||
extern aligned_##name name[MAX_CORES];\
|
||||
inline static type* __get_percore_##name(void) {\
|
||||
type* ret; \
|
||||
uint8_t flags = irq_nested_disable(); \
|
||||
ret = &(name[smp_id()].var); \
|
||||
irq_nested_enable(flags);\
|
||||
return ret; \
|
||||
}
|
||||
#define DEFINE_PER_CORE(type, name, def_value) \
|
||||
aligned_##name name[MAX_CORES] = {[0 ... MAX_CORES-1] = {def_value}};
|
||||
#define DEFINE_PER_CORE_STATIC(type, name, def_value) \
|
||||
typedef struct { type var __attribute__ ((aligned (CACHE_LINE))); } aligned_##name;\
|
||||
static aligned_##name name[MAX_CORES] = {[0 ... MAX_CORES-1] = {def_value}}; \
|
||||
inline static type* __get_percore_##name(void) {\
|
||||
type* ret; \
|
||||
uint8_t flags = irq_nested_disable(); \
|
||||
ret = &(name[smp_id()].var); \
|
||||
irq_nested_enable(flags);\
|
||||
return ret; \
|
||||
}
|
||||
#define CORE_ID smp_id()
|
||||
#endif
|
||||
|
||||
/* needed to find the task, which is currently running on this core */
|
||||
struct task;
|
||||
/// pointer to the current (running) task
|
||||
extern struct task* current_task;
|
||||
DECLARE_PER_CORE(struct task*, current_task);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -107,7 +107,7 @@ int create_user_task(tid_t* id, const char* fame, char** argv);
|
|||
* - 0 on success
|
||||
* - -ENOMEM (-12) or -EINVAL (-22) on failure
|
||||
*/
|
||||
int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio);
|
||||
int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio, uint32_t core_id);
|
||||
|
||||
/** @brief Cleanup function for the task termination
|
||||
*
|
||||
|
|
|
@ -73,6 +73,8 @@ typedef struct task {
|
|||
tid_t id __attribute__ ((aligned (CACHE_LINE)));
|
||||
/// Task status (INVALID, READY, RUNNING, ...)
|
||||
uint32_t status;
|
||||
/// last core id on which the task was running
|
||||
uint32_t last_core;
|
||||
/// copy of the stack pointer before a context switch
|
||||
size_t* last_stack_pointer;
|
||||
/// start address of the stack
|
||||
|
|
|
@ -55,6 +55,13 @@ extern atomic_int32_t total_pages;
|
|||
extern atomic_int32_t total_allocated_pages;
|
||||
extern atomic_int32_t total_available_pages;
|
||||
|
||||
static int foo(void* arg)
|
||||
{
|
||||
kprintf("hello from %s\n", (char*) arg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hermit_init(void)
|
||||
{
|
||||
// initialize .bss section
|
||||
|
@ -82,6 +89,8 @@ int main(void)
|
|||
kprintf("Current allocated memory: %lu KiB\n", atomic_int32_read(&total_allocated_pages) * PAGE_SIZE / 1024);
|
||||
kprintf("Current available memory: %lu KiB\n", atomic_int32_read(&total_available_pages) * PAGE_SIZE / 1024);
|
||||
|
||||
create_kernel_task(NULL, foo, "foo", NORMAL_PRIO);
|
||||
|
||||
while(1) {
|
||||
HALT;
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ static int sys_write(int fd, const char* buf, size_t len)
|
|||
|
||||
static ssize_t sys_sbrk(int incr)
|
||||
{
|
||||
task_t* task = current_task;
|
||||
task_t* task = per_core(current_task);
|
||||
vma_t* heap = task->heap;
|
||||
ssize_t ret;
|
||||
|
||||
|
|
|
@ -41,14 +41,19 @@
|
|||
* A task's id will be its position in this array.
|
||||
*/
|
||||
static task_t task_table[MAX_TASKS] = { \
|
||||
[0] = {0, TASK_IDLE, NULL, NULL, TASK_DEFAULT_FLAGS, 0, 0, SPINLOCK_IRQSAVE_INIT, SPINLOCK_INIT, NULL, NULL, ATOMIC_INIT(0), NULL, NULL}, \
|
||||
[1 ... MAX_TASKS-1] = {0, TASK_INVALID, NULL, NULL, TASK_DEFAULT_FLAGS, 0, 0, SPINLOCK_IRQSAVE_INIT, SPINLOCK_INIT, NULL, NULL,ATOMIC_INIT(0), NULL, NULL}};
|
||||
[0] = {0, TASK_IDLE, 0, NULL, NULL, TASK_DEFAULT_FLAGS, 0, 0, SPINLOCK_IRQSAVE_INIT, SPINLOCK_INIT, NULL, NULL, ATOMIC_INIT(0), NULL, NULL}, \
|
||||
[1 ... MAX_TASKS-1] = {0, TASK_INVALID, 0, NULL, NULL, TASK_DEFAULT_FLAGS, 0, 0, SPINLOCK_IRQSAVE_INIT, SPINLOCK_INIT, NULL, NULL,ATOMIC_INIT(0), NULL, NULL}};
|
||||
|
||||
static spinlock_irqsave_t table_lock = SPINLOCK_IRQSAVE_INIT;
|
||||
|
||||
static readyqueues_t readyqueues = {task_table+0, NULL, 0, 0, {[0 ... MAX_PRIO-2] = {NULL, NULL}}, SPINLOCK_IRQSAVE_INIT};
|
||||
#if MAX_CORES > 1
|
||||
static readyqueues_t readyqueues[MAX_CORES] = { \
|
||||
[0 ... MAX_CORES-1] = {NULL, NULL, 0, 0, {[0 ... MAX_PRIO-2] = {NULL, NULL}}, SPINLOCK_IRQSAVE_INIT}};
|
||||
#else
|
||||
static readyqueues_t readyqueues[1] = {[0] = {task_table+0, NULL, 0, 0, {[0 ... MAX_PRIO-2] = {NULL, NULL}}, SPINLOCK_IRQSAVE_INIT}};
|
||||
#endif
|
||||
|
||||
task_t* current_task = task_table+0;
|
||||
DEFINE_PER_CORE(task_t*, current_task, task_table+0);
|
||||
extern const void boot_stack;
|
||||
|
||||
/** @brief helper function for the assembly code to determine the current task
|
||||
|
@ -56,12 +61,12 @@ extern const void boot_stack;
|
|||
*/
|
||||
task_t* get_current_task(void)
|
||||
{
|
||||
return current_task;
|
||||
return per_core(current_task);
|
||||
}
|
||||
|
||||
uint32_t get_highest_priority(void)
|
||||
{
|
||||
return msb(readyqueues.prio_bitmap);
|
||||
return msb(readyqueues[CORE_ID].prio_bitmap);
|
||||
}
|
||||
|
||||
int multitasking_init(void)
|
||||
|
@ -72,9 +77,11 @@ int multitasking_init(void)
|
|||
}
|
||||
|
||||
task_table[0].prio = IDLE_PRIO;
|
||||
task_table[0].stack = (void*) &boot_stack;
|
||||
task_table[0].stack = (char*) &boot_stack;
|
||||
task_table[0].page_map = read_cr3();
|
||||
|
||||
readyqueues[CORE_ID].idle = task_table+0;
|
||||
|
||||
// register idle task
|
||||
register_task();
|
||||
|
||||
|
@ -85,50 +92,53 @@ void finish_task_switch(void)
|
|||
{
|
||||
task_t* old;
|
||||
uint8_t prio;
|
||||
const uint32_t core_id = CORE_ID;
|
||||
task_t* curr_task = per_core(current_task);
|
||||
|
||||
spinlock_irqsave_lock(&readyqueues.lock);
|
||||
spinlock_irqsave_lock(&readyqueues[core_id].lock);
|
||||
|
||||
if ((old = readyqueues.old_task) != NULL) {
|
||||
if ((old = readyqueues[core_id].old_task) != NULL) {
|
||||
if (old->status == TASK_INVALID) {
|
||||
old->stack = NULL;
|
||||
old->last_stack_pointer = NULL;
|
||||
readyqueues.old_task = NULL;
|
||||
readyqueues[core_id].old_task = NULL;
|
||||
} else {
|
||||
prio = old->prio;
|
||||
if (!readyqueues.queue[prio-1].first) {
|
||||
if (!readyqueues[core_id].queue[prio-1].first) {
|
||||
old->next = old->prev = NULL;
|
||||
readyqueues.queue[prio-1].first = readyqueues.queue[prio-1].last = old;
|
||||
readyqueues[core_id].queue[prio-1].first = readyqueues[core_id].queue[prio-1].last = old;
|
||||
} else {
|
||||
old->next = NULL;
|
||||
old->prev = readyqueues.queue[prio-1].last;
|
||||
readyqueues.queue[prio-1].last->next = old;
|
||||
readyqueues.queue[prio-1].last = old;
|
||||
old->prev = readyqueues[core_id].queue[prio-1].last;
|
||||
readyqueues[core_id].queue[prio-1].last->next = old;
|
||||
readyqueues[core_id].queue[prio-1].last = old;
|
||||
}
|
||||
readyqueues.old_task = NULL;
|
||||
readyqueues.prio_bitmap |= (1 << prio);
|
||||
readyqueues[core_id].old_task = NULL;
|
||||
readyqueues[core_id].prio_bitmap |= (1 << prio);
|
||||
}
|
||||
}
|
||||
|
||||
spinlock_irqsave_unlock(&readyqueues.lock);
|
||||
spinlock_irqsave_unlock(&readyqueues[core_id].lock);
|
||||
|
||||
if (current_task->heap)
|
||||
kfree(current_task->heap);
|
||||
if (curr_task->heap)
|
||||
kfree(curr_task->heap);
|
||||
}
|
||||
|
||||
/** @brief A procedure to be called by
|
||||
* procedures which are called by exiting tasks. */
|
||||
static void NORETURN do_exit(int arg)
|
||||
{
|
||||
task_t* curr_task = current_task;
|
||||
task_t* curr_task = per_core(current_task);
|
||||
const uint32_t core_id = CORE_ID;
|
||||
|
||||
kprintf("Terminate task: %u, return value %d\n", curr_task->id, arg);
|
||||
|
||||
page_map_drop();
|
||||
|
||||
// decrease the number of active tasks
|
||||
spinlock_irqsave_lock(&readyqueues.lock);
|
||||
readyqueues.nr_tasks--;
|
||||
spinlock_irqsave_unlock(&readyqueues.lock);
|
||||
spinlock_irqsave_lock(&readyqueues[core_id].lock);
|
||||
readyqueues[core_id].nr_tasks--;
|
||||
spinlock_irqsave_unlock(&readyqueues[core_id].lock);
|
||||
|
||||
curr_task->status = TASK_FINISHED;
|
||||
reschedule();
|
||||
|
@ -157,7 +167,7 @@ void NORETURN abort(void) {
|
|||
do_exit(-1);
|
||||
}
|
||||
|
||||
int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio)
|
||||
int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio, uint32_t core_id)
|
||||
{
|
||||
int ret = -ENOMEM;
|
||||
uint32_t i;
|
||||
|
@ -175,6 +185,7 @@ int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio)
|
|||
if (task_table[i].status == TASK_INVALID) {
|
||||
task_table[i].id = i;
|
||||
task_table[i].status = TASK_READY;
|
||||
task_table[i].last_core = 0;
|
||||
task_table[i].last_stack_pointer = NULL;
|
||||
task_table[i].stack = create_stack(i);
|
||||
task_table[i].prio = prio;
|
||||
|
@ -199,20 +210,20 @@ int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio)
|
|||
ret = create_default_frame(task_table+i, ep, arg);
|
||||
|
||||
// add task in the readyqueues
|
||||
spinlock_irqsave_lock(&readyqueues.lock);
|
||||
readyqueues.prio_bitmap |= (1 << prio);
|
||||
readyqueues.nr_tasks++;
|
||||
if (!readyqueues.queue[prio-1].first) {
|
||||
spinlock_irqsave_lock(&readyqueues[core_id].lock);
|
||||
readyqueues[core_id].prio_bitmap |= (1 << prio);
|
||||
readyqueues[core_id].nr_tasks++;
|
||||
if (!readyqueues[core_id].queue[prio-1].first) {
|
||||
task_table[i].next = task_table[i].prev = NULL;
|
||||
readyqueues.queue[prio-1].first = task_table+i;
|
||||
readyqueues.queue[prio-1].last = task_table+i;
|
||||
readyqueues[core_id].queue[prio-1].first = task_table+i;
|
||||
readyqueues[core_id].queue[prio-1].last = task_table+i;
|
||||
} else {
|
||||
task_table[i].prev = readyqueues.queue[prio-1].last;
|
||||
task_table[i].prev = readyqueues[core_id].queue[prio-1].last;
|
||||
task_table[i].next = NULL;
|
||||
readyqueues.queue[prio-1].last->next = task_table+i;
|
||||
readyqueues.queue[prio-1].last = task_table+i;
|
||||
readyqueues[core_id].queue[prio-1].last->next = task_table+i;
|
||||
readyqueues[core_id].queue[prio-1].last = task_table+i;
|
||||
}
|
||||
spinlock_irqsave_unlock(&readyqueues.lock);
|
||||
spinlock_irqsave_unlock(&readyqueues[core_id].lock);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -228,7 +239,7 @@ int create_kernel_task(tid_t* id, entry_point_t ep, void* args, uint8_t prio)
|
|||
if (prio > MAX_PRIO)
|
||||
prio = NORMAL_PRIO;
|
||||
|
||||
return create_task(id, ep, args, prio);
|
||||
return create_task(id, ep, args, prio, CORE_ID);
|
||||
}
|
||||
|
||||
/** @brief Wakeup a blocked task
|
||||
|
@ -240,7 +251,7 @@ int create_kernel_task(tid_t* id, entry_point_t ep, void* args, uint8_t prio)
|
|||
int wakeup_task(tid_t id)
|
||||
{
|
||||
task_t* task;
|
||||
uint32_t prio;
|
||||
uint32_t core_id, prio;
|
||||
int ret = -EINVAL;
|
||||
uint8_t flags;
|
||||
|
||||
|
@ -248,27 +259,28 @@ int wakeup_task(tid_t id)
|
|||
|
||||
task = task_table + id;
|
||||
prio = task->prio;
|
||||
core_id = task->last_core;
|
||||
|
||||
if (task->status == TASK_BLOCKED) {
|
||||
task->status = TASK_READY;
|
||||
ret = 0;
|
||||
|
||||
spinlock_irqsave_lock(&readyqueues.lock);
|
||||
spinlock_irqsave_lock(&readyqueues[core_id].lock);
|
||||
// increase the number of ready tasks
|
||||
readyqueues.nr_tasks++;
|
||||
readyqueues[core_id].nr_tasks++;
|
||||
|
||||
// add task to the runqueue
|
||||
if (!readyqueues.queue[prio-1].last) {
|
||||
readyqueues.queue[prio-1].last = readyqueues.queue[prio-1].first = task;
|
||||
if (!readyqueues[core_id].queue[prio-1].last) {
|
||||
readyqueues[core_id].queue[prio-1].last = readyqueues[core_id].queue[prio-1].first = task;
|
||||
task->next = task->prev = NULL;
|
||||
readyqueues.prio_bitmap |= (1 << prio);
|
||||
readyqueues[core_id].prio_bitmap |= (1 << prio);
|
||||
} else {
|
||||
task->prev = readyqueues.queue[prio-1].last;
|
||||
task->prev = readyqueues[core_id].queue[prio-1].last;
|
||||
task->next = NULL;
|
||||
readyqueues.queue[prio-1].last->next = task;
|
||||
readyqueues.queue[prio-1].last = task;
|
||||
readyqueues[core_id].queue[prio-1].last->next = task;
|
||||
readyqueues[core_id].queue[prio-1].last = task;
|
||||
}
|
||||
spinlock_irqsave_unlock(&readyqueues.lock);
|
||||
spinlock_irqsave_unlock(&readyqueues[core_id].lock);
|
||||
}
|
||||
|
||||
irq_nested_enable(flags);
|
||||
|
@ -286,41 +298,44 @@ int wakeup_task(tid_t id)
|
|||
*/
|
||||
int block_current_task(void)
|
||||
{
|
||||
task_t* curr_task;
|
||||
tid_t id;
|
||||
uint32_t prio;
|
||||
uint32_t prio, core_id;
|
||||
int ret = -EINVAL;
|
||||
uint8_t flags;
|
||||
|
||||
flags = irq_nested_disable();
|
||||
|
||||
id = current_task->id;
|
||||
prio = current_task->prio;
|
||||
curr_task = per_core(current_task);
|
||||
id = curr_task->id;
|
||||
prio = curr_task->prio;
|
||||
core_id = CORE_ID;
|
||||
|
||||
if (task_table[id].status == TASK_RUNNING) {
|
||||
task_table[id].status = TASK_BLOCKED;
|
||||
ret = 0;
|
||||
|
||||
spinlock_irqsave_lock(&readyqueues.lock);
|
||||
spinlock_irqsave_lock(&readyqueues[core_id].lock);
|
||||
// reduce the number of ready tasks
|
||||
readyqueues.nr_tasks--;
|
||||
readyqueues[core_id].nr_tasks--;
|
||||
|
||||
// remove task from queue
|
||||
if (task_table[id].prev)
|
||||
task_table[id].prev->next = task_table[id].next;
|
||||
if (task_table[id].next)
|
||||
task_table[id].next->prev = task_table[id].prev;
|
||||
if (readyqueues.queue[prio-1].first == task_table+id)
|
||||
readyqueues.queue[prio-1].first = task_table[id].next;
|
||||
if (readyqueues.queue[prio-1].last == task_table+id) {
|
||||
readyqueues.queue[prio-1].last = task_table[id].prev;
|
||||
if (!readyqueues.queue[prio-1].last)
|
||||
readyqueues.queue[prio-1].last = readyqueues.queue[prio-1].first;
|
||||
if (readyqueues[core_id].queue[prio-1].first == task_table+id)
|
||||
readyqueues[core_id].queue[prio-1].first = task_table[id].next;
|
||||
if (readyqueues[core_id].queue[prio-1].last == task_table+id) {
|
||||
readyqueues[core_id].queue[prio-1].last = task_table[id].prev;
|
||||
if (!readyqueues[core_id].queue[prio-1].last)
|
||||
readyqueues[core_id].queue[prio-1].last = readyqueues[core_id].queue[prio-1].first;
|
||||
}
|
||||
|
||||
// No valid task in queue => update prio_bitmap
|
||||
if (!readyqueues.queue[prio-1].first)
|
||||
readyqueues.prio_bitmap &= ~(1 << prio);
|
||||
spinlock_irqsave_unlock(&readyqueues.lock);
|
||||
if (!readyqueues[core_id].queue[prio-1].first)
|
||||
readyqueues[core_id].prio_bitmap &= ~(1 << prio);
|
||||
spinlock_irqsave_unlock(&readyqueues[core_id].lock);
|
||||
}
|
||||
|
||||
irq_nested_enable(flags);
|
||||
|
@ -331,60 +346,63 @@ int block_current_task(void)
|
|||
size_t** scheduler(void)
|
||||
{
|
||||
task_t* orig_task;
|
||||
task_t* curr_task;
|
||||
const int32_t core_id = CORE_ID;
|
||||
uint32_t prio;
|
||||
|
||||
orig_task = current_task;
|
||||
orig_task = curr_task = per_core(current_task);
|
||||
curr_task->last_core = core_id;
|
||||
|
||||
spinlock_irqsave_lock(&readyqueues.lock);
|
||||
spinlock_irqsave_lock(&readyqueues[core_id].lock);
|
||||
|
||||
/* signalizes that this task could be reused */
|
||||
if (current_task->status == TASK_FINISHED) {
|
||||
current_task->status = TASK_INVALID;
|
||||
readyqueues.old_task = current_task;
|
||||
} else readyqueues.old_task = NULL; // reset old task
|
||||
if (curr_task->status == TASK_FINISHED) {
|
||||
curr_task->status = TASK_INVALID;
|
||||
readyqueues[core_id].old_task = curr_task;
|
||||
} else readyqueues[core_id].old_task = NULL; // reset old task
|
||||
|
||||
prio = msb(readyqueues.prio_bitmap); // determines highest priority
|
||||
prio = msb(readyqueues[core_id].prio_bitmap); // determines highest priority
|
||||
if (prio > MAX_PRIO) {
|
||||
if ((current_task->status == TASK_RUNNING) || (current_task->status == TASK_IDLE))
|
||||
if ((curr_task->status == TASK_RUNNING) || (curr_task->status == TASK_IDLE))
|
||||
goto get_task_out;
|
||||
current_task = readyqueues.idle;
|
||||
curr_task = per_core(current_task) = readyqueues[core_id].idle;
|
||||
} else {
|
||||
// Does the current task have an higher priority? => no task switch
|
||||
if ((current_task->prio > prio) && (current_task->status == TASK_RUNNING))
|
||||
if ((curr_task->prio > prio) && (curr_task->status == TASK_RUNNING))
|
||||
goto get_task_out;
|
||||
|
||||
if (current_task->status == TASK_RUNNING) {
|
||||
current_task->status = TASK_READY;
|
||||
readyqueues.old_task = current_task;
|
||||
if (curr_task->status == TASK_RUNNING) {
|
||||
curr_task->status = TASK_READY;
|
||||
readyqueues[core_id].old_task = curr_task;
|
||||
}
|
||||
|
||||
current_task = readyqueues.queue[prio-1].first;
|
||||
if (BUILTIN_EXPECT(current_task->status == TASK_INVALID, 0)) {
|
||||
kprintf("Upps!!!!!!! Got invalid task %d, orig task %d\n", current_task->id, orig_task->id);
|
||||
curr_task = per_core(current_task) = readyqueues[core_id].queue[prio-1].first;
|
||||
if (BUILTIN_EXPECT(curr_task->status == TASK_INVALID, 0)) {
|
||||
kprintf("Upps!!!!!!! Got invalid task %d, orig task %d\n", curr_task->id, orig_task->id);
|
||||
}
|
||||
current_task->status = TASK_RUNNING;
|
||||
curr_task->status = TASK_RUNNING;
|
||||
|
||||
// remove new task from queue
|
||||
// by the way, priority 0 is only used by the idle task and doesn't need own queue
|
||||
readyqueues.queue[prio-1].first = current_task->next;
|
||||
if (!current_task->next) {
|
||||
readyqueues.queue[prio-1].last = NULL;
|
||||
readyqueues.prio_bitmap &= ~(1 << prio);
|
||||
readyqueues[core_id].queue[prio-1].first = curr_task->next;
|
||||
if (!curr_task->next) {
|
||||
readyqueues[core_id].queue[prio-1].last = NULL;
|
||||
readyqueues[core_id].prio_bitmap &= ~(1 << prio);
|
||||
}
|
||||
current_task->next = current_task->prev = NULL;
|
||||
curr_task->next = curr_task->prev = NULL;
|
||||
}
|
||||
|
||||
get_task_out:
|
||||
spinlock_irqsave_unlock(&readyqueues.lock);
|
||||
spinlock_irqsave_unlock(&readyqueues[core_id].lock);
|
||||
|
||||
if (current_task != orig_task) {
|
||||
if (curr_task != orig_task) {
|
||||
/* if the original task is using the FPU, we need to save the FPU context */
|
||||
if ((orig_task->flags & TASK_FPU_USED) && (orig_task->status == TASK_READY)) {
|
||||
save_fpu_state(&(orig_task->fpu));
|
||||
orig_task->flags &= ~TASK_FPU_USED;
|
||||
}
|
||||
|
||||
kprintf("schedule from %u to %u with prio %u\n", orig_task->id, current_task->id, (uint32_t)current_task->prio);
|
||||
kprintf("schedule on core %d from %u to %u with prio %u\n", core_id, orig_task->id, curr_task->id, (uint32_t)curr_task->prio);
|
||||
|
||||
return (size_t**) &(orig_task->last_stack_pointer);
|
||||
}
|
||||
|
|
|
@ -105,7 +105,7 @@ out:
|
|||
|
||||
size_t vma_alloc(size_t size, uint32_t flags)
|
||||
{
|
||||
task_t* task = current_task;
|
||||
task_t* task = per_core(current_task);
|
||||
spinlock_t* lock;
|
||||
vma_t** list;
|
||||
|
||||
|
@ -178,7 +178,7 @@ found:
|
|||
|
||||
int vma_free(size_t start, size_t end)
|
||||
{
|
||||
task_t* task = current_task;
|
||||
task_t* task = per_core(current_task);
|
||||
spinlock_t* lock;
|
||||
vma_t* vma;
|
||||
vma_t** list = NULL;
|
||||
|
@ -251,7 +251,7 @@ int vma_free(size_t start, size_t end)
|
|||
|
||||
int vma_add(size_t start, size_t end, uint32_t flags)
|
||||
{
|
||||
task_t* task = current_task;
|
||||
task_t* task = per_core(current_task);
|
||||
spinlock_t* lock;
|
||||
vma_t** list;
|
||||
|
||||
|
@ -386,7 +386,7 @@ void vma_dump(void)
|
|||
}
|
||||
}
|
||||
|
||||
task_t* task = current_task;
|
||||
task_t* task = per_core(current_task);
|
||||
|
||||
kputs("Kernelspace VMAs:\n");
|
||||
spinlock_lock(&vma_lock);
|
||||
|
|
Loading…
Add table
Reference in a new issue