use static stacks only for the idle tasks
=> all other stacks will be created on demand
This commit is contained in:
parent
ae1216f757
commit
4775e5e11e
9 changed files with 100 additions and 57 deletions
|
@ -110,12 +110,6 @@ static inline int jump_to_user_code(uint32_t ep, uint32_t stack)
|
|||
#endif
|
||||
}
|
||||
|
||||
/** @brief determines the stack of a specific task
|
||||
*
|
||||
* @return start address of a specific task
|
||||
*/
|
||||
size_t get_stack(uint32_t id);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -51,13 +51,12 @@ mboot:
|
|||
|
||||
msg db "?ello from MetalSVM kernel!!", 0
|
||||
|
||||
extern default_stack_pointer
|
||||
|
||||
SECTION .text
|
||||
ALIGN 4
|
||||
stublet:
|
||||
; initialize stack pointer.
|
||||
mov esp, [default_stack_pointer]
|
||||
mov esp, boot_stack
|
||||
add esp, KERNEL_STACK_SIZE-16
|
||||
; save pointer to the multiboot structure
|
||||
push ebx
|
||||
; initialize cpu features
|
||||
|
@ -803,4 +802,10 @@ no_context_switch:
|
|||
add esp, 8
|
||||
iret
|
||||
|
||||
SECTION .data
|
||||
global boot_stack
|
||||
ALIGN 4096
|
||||
boot_stack:
|
||||
TIMES (MAX_CORES*KERNEL_STACK_SIZE) DB 0xcd
|
||||
|
||||
SECTION .note.GNU-stack noalloc noexec nowrite progbits
|
||||
|
|
|
@ -360,8 +360,8 @@ start64:
|
|||
mov gs, ax
|
||||
mov ss, ax
|
||||
; set default stack pointer
|
||||
extern default_stack_pointer
|
||||
mov rsp, [default_stack_pointer]
|
||||
mov rsp, boot_stack
|
||||
add rsp, KERNEL_STACK_SIZE-16
|
||||
; interpret multiboot information
|
||||
extern multiboot_init
|
||||
mov rdi, rbx
|
||||
|
@ -1164,4 +1164,10 @@ no_context_switch:
|
|||
add rsp, 16
|
||||
iretq
|
||||
|
||||
SECTION .data
|
||||
global boot_stack
|
||||
ALIGN 4096
|
||||
boot_stack:
|
||||
TIMES (MAX_CORES*KERNEL_STACK_SIZE) DB 0xcd
|
||||
|
||||
SECTION .note.GNU-stack noalloc noexec nowrite progbits
|
||||
|
|
|
@ -29,8 +29,6 @@
|
|||
|
||||
gdt_ptr_t gp;
|
||||
static tss_t task_state_segments[MAX_CORES] __attribute__ ((aligned (PAGE_SIZE)));
|
||||
static unsigned char kstacks[MAX_TASKS][KERNEL_STACK_SIZE] __attribute__ ((aligned (PAGE_SIZE))) = {[0][0 ... KERNEL_STACK_SIZE-1] = 0xCD};
|
||||
size_t default_stack_pointer = (size_t) kstacks[0] + KERNEL_STACK_SIZE - 16; // => stack is 16byte aligned
|
||||
// currently, our kernel has full access to the ioports
|
||||
static gdt_entry_t gdt[GDT_ENTRIES] = {[0 ... GDT_ENTRIES-1] = {0, 0, 0, 0, 0, 0}};
|
||||
|
||||
|
@ -46,34 +44,28 @@ size_t* get_current_stack(void)
|
|||
|
||||
// determine and set esp0
|
||||
#ifdef CONFIG_X86_32
|
||||
task_state_segments[CORE_ID].esp0 = (size_t) kstacks[curr_task->id] + KERNEL_STACK_SIZE - 16; // => stack is 16byte aligned
|
||||
task_state_segments[CORE_ID].esp0 = (size_t) curr_task->stack + KERNEL_STACK_SIZE - 16; // => stack is 16byte aligned
|
||||
#else
|
||||
task_state_segments[CORE_ID].rsp0 = (size_t) kstacks[curr_task->id] + KERNEL_STACK_SIZE - 16; // => stack is 16byte aligned
|
||||
task_state_segments[CORE_ID].rsp0 = (size_t) curr_task->stack + KERNEL_STACK_SIZE - 16; // => stack is 16byte aligned
|
||||
#endif
|
||||
|
||||
// use new page table
|
||||
write_cr3(virt_to_phys((size_t)curr_task->pgd));
|
||||
|
||||
return curr_task->stack;
|
||||
}
|
||||
|
||||
size_t get_stack(uint32_t id)
|
||||
{
|
||||
if (BUILTIN_EXPECT(id >= MAX_TASKS, 0))
|
||||
return -EINVAL;
|
||||
return (size_t) kstacks[id] + KERNEL_STACK_SIZE - 16; // => stack is 16byte aligned
|
||||
return curr_task->last_stack_pointer;
|
||||
}
|
||||
|
||||
int arch_fork(task_t* task)
|
||||
{
|
||||
uint32_t id;
|
||||
struct state* state;
|
||||
task_t* curr_task = per_core(current_task);
|
||||
size_t esp, state_size;
|
||||
|
||||
if (BUILTIN_EXPECT(!task, 0))
|
||||
return -EINVAL;
|
||||
id = task->id;
|
||||
|
||||
if (BUILTIN_EXPECT(!task->stack, 0))
|
||||
return -EINVAL;
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
state_size = sizeof(struct state) - 2*sizeof(size_t);
|
||||
|
@ -83,12 +75,12 @@ int arch_fork(task_t* task)
|
|||
|
||||
// copy kernel stack of the current task
|
||||
mb();
|
||||
memcpy(kstacks[id], kstacks[curr_task->id], KERNEL_STACK_SIZE);
|
||||
memcpy(task->stack, curr_task->stack, KERNEL_STACK_SIZE);
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
asm volatile ("mov %%esp, %0" : "=m"(esp));
|
||||
esp -= (size_t) kstacks[curr_task->id];
|
||||
esp += (size_t) kstacks[id];
|
||||
esp -= (size_t) curr_task->stack;
|
||||
esp += (size_t) task->stack;
|
||||
|
||||
state = (struct state*) (esp - state_size);
|
||||
//memset(state, 0x00, state_size);
|
||||
|
@ -103,7 +95,7 @@ int arch_fork(task_t* task)
|
|||
asm volatile ("pop %0" : "=m"(state->eax));
|
||||
|
||||
state->esp = esp;
|
||||
task->stack = (size_t*) state;
|
||||
task->last_stack_pointer = (size_t*) state;
|
||||
state->int_no = 0xB16B00B5;
|
||||
state->error = 0xC03DB4B3;
|
||||
state->cs = 0x08;
|
||||
|
@ -123,23 +115,23 @@ int arch_fork(task_t* task)
|
|||
|
||||
int create_default_frame(task_t* task, entry_point_t ep, void* arg)
|
||||
{
|
||||
uint16_t cs = 0x08;
|
||||
uint32_t id;
|
||||
size_t *stack;
|
||||
struct state *stptr;
|
||||
size_t state_size;
|
||||
|
||||
if (BUILTIN_EXPECT(!task, 0))
|
||||
return -EINVAL;
|
||||
id = task->id;
|
||||
|
||||
memset(kstacks[id], 0xCD, KERNEL_STACK_SIZE);
|
||||
if (BUILTIN_EXPECT(!task->stack, 0))
|
||||
return -EINVAL;
|
||||
|
||||
memset(task->stack, 0xCD, KERNEL_STACK_SIZE);
|
||||
|
||||
/* The difference between setting up a task for SW-task-switching
|
||||
* and not for HW-task-switching is setting up a stack and not a TSS.
|
||||
* This is the stack which will be activated and popped off for iret later.
|
||||
*/
|
||||
stack = (size_t*) (kstacks[id] + KERNEL_STACK_SIZE - 16); // => stack is 16byte aligned
|
||||
stack = (size_t*) (task->stack + KERNEL_STACK_SIZE - 16); // => stack is 16byte aligned
|
||||
|
||||
/* The next three things on the stack are a marker for debugging purposes, ... */
|
||||
*stack-- = 0xDEADBEEF;
|
||||
|
@ -183,7 +175,7 @@ int create_default_frame(task_t* task, entry_point_t ep, void* arg)
|
|||
#else
|
||||
stptr->rip = (size_t)ep;
|
||||
#endif
|
||||
stptr->cs = cs;
|
||||
stptr->cs = 0x08;
|
||||
#ifdef CONFIG_X86_32
|
||||
stptr->eflags = 0x1202;
|
||||
// the creation of a kernel tasks didn't change the IOPL level
|
||||
|
@ -195,7 +187,7 @@ int create_default_frame(task_t* task, entry_point_t ep, void* arg)
|
|||
#endif
|
||||
|
||||
/* Set the task's stack pointer entry to the stack we have crafted right now. */
|
||||
task->stack = (size_t*)stack;
|
||||
task->last_stack_pointer = (size_t*)stack;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -115,7 +115,6 @@ static void timer_handler(struct state *s)
|
|||
vga_puts("One second has passed\n");
|
||||
}*/
|
||||
|
||||
kputs("A");
|
||||
/* Dump load every minute */
|
||||
//if (timer_ticks % (TIMER_FREQ*60) == 0)
|
||||
// dump_load();
|
||||
|
|
|
@ -83,6 +83,19 @@ void* mem_allocation(size_t sz, uint32_t flags);
|
|||
*/
|
||||
void kfree(void*, size_t);
|
||||
|
||||
/** @brief Create a new stack for a new task
|
||||
*
|
||||
* @return start address of the new stack
|
||||
*/
|
||||
void* create_stack(void);
|
||||
|
||||
/** @brief Delete stack of a finished task
|
||||
*
|
||||
* @param Pointer to
|
||||
* @return 0 on success
|
||||
*/
|
||||
int destroy_stack(task_t*);
|
||||
|
||||
/** @brief String to long
|
||||
*
|
||||
* This one is documented in newlib library.
|
||||
|
|
|
@ -71,7 +71,9 @@ typedef struct task {
|
|||
/// Task status (INVALID, READY, RUNNING, ...)
|
||||
uint32_t status;
|
||||
/// copy of the stack pointer before a context switch
|
||||
size_t* stack;
|
||||
size_t* last_stack_pointer;
|
||||
/// start address of the stack
|
||||
void* stack;
|
||||
/// Additional status flags. For instance, to signalize the using of the FPU
|
||||
uint8_t flags;
|
||||
/// Task priority
|
||||
|
|
|
@ -47,8 +47,8 @@
|
|||
* A task's id will be its position in this array.
|
||||
*/
|
||||
static task_t task_table[MAX_TASKS] = { \
|
||||
[0] = {0, TASK_IDLE, NULL, 0, 0, 0, NULL, NULL, 0, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, NULL, 0, 0, 0, 0}, \
|
||||
[1 ... MAX_TASKS-1] = {0, TASK_INVALID, NULL, 0, 0, 0, NULL, NULL, 0, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, NULL, 0, 0, 0, 0}};
|
||||
[0] = {0, TASK_IDLE, NULL, NULL, 0, 0, 0, NULL, NULL, 0, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, NULL, 0, 0, 0, 0}, \
|
||||
[1 ... MAX_TASKS-1] = {0, TASK_INVALID, NULL, NULL, 0, 0, 0, NULL, NULL, 0, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, NULL, 0, 0, 0, 0}};
|
||||
static spinlock_irqsave_t table_lock = SPINLOCK_IRQSAVE_INIT;
|
||||
#ifndef CONFIG_TICKLESS
|
||||
#if MAX_CORES > 1
|
||||
|
@ -75,6 +75,7 @@ extern atomic_int32_t cpu_online;
|
|||
#endif
|
||||
|
||||
DEFINE_PER_CORE(task_t*, current_task, task_table+0);
|
||||
extern const void boot_stack;
|
||||
|
||||
/** @brief helper function for the assembly code to determine the current task
|
||||
* @return Pointer to the task_t structure of current task
|
||||
|
@ -106,6 +107,7 @@ int multitasking_init(void) {
|
|||
task_table[0].pgd = get_boot_pgd();
|
||||
task_table[0].flags = TASK_DEFAULT_FLAGS;
|
||||
task_table[0].prio = IDLE_PRIO;
|
||||
task_table[0].stack = (void*) &boot_stack;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -117,6 +119,8 @@ size_t get_idle_task(uint32_t id)
|
|||
return -EINVAL;
|
||||
|
||||
task_table[id].id = id;
|
||||
task_table[id].last_stack_pointer = NULL;
|
||||
task_table[id].stack = (void*) ((size_t)&boot_stack + id * KERNEL_STACK_SIZE);
|
||||
task_table[id].status = TASK_IDLE;
|
||||
task_table[id].prio = IDLE_PRIO;
|
||||
task_table[id].flags = TASK_DEFAULT_FLAGS;
|
||||
|
@ -128,7 +132,7 @@ size_t get_idle_task(uint32_t id)
|
|||
current_task[id].var = task_table+id;
|
||||
runqueues[id].idle = task_table+id;
|
||||
|
||||
return get_stack(id);
|
||||
return (size_t) task_table[id].stack + KERNEL_STACK_SIZE - 16;
|
||||
#else
|
||||
return -EINVAL;
|
||||
#endif
|
||||
|
@ -142,18 +146,25 @@ void finish_task_switch(void)
|
|||
|
||||
spinlock_irqsave_lock(&runqueues[core_id].lock);
|
||||
if ((old = runqueues[core_id].old_task) != NULL) {
|
||||
prio = old->prio;
|
||||
if (!runqueues[core_id].queue[prio-1].first) {
|
||||
old->next = old->prev = NULL;
|
||||
runqueues[core_id].queue[prio-1].first = runqueues[core_id].queue[prio-1].last = old;
|
||||
if (old->status == TASK_INVALID) {
|
||||
destroy_stack(old);
|
||||
old->stack = NULL;
|
||||
old->last_stack_pointer = NULL;
|
||||
runqueues[core_id].old_task = NULL;
|
||||
} else {
|
||||
old->next = NULL;
|
||||
old->prev = runqueues[core_id].queue[prio-1].last;
|
||||
runqueues[core_id].queue[prio-1].last->next = old;
|
||||
runqueues[core_id].queue[prio-1].last = old;
|
||||
prio = old->prio;
|
||||
if (!runqueues[core_id].queue[prio-1].first) {
|
||||
old->next = old->prev = NULL;
|
||||
runqueues[core_id].queue[prio-1].first = runqueues[core_id].queue[prio-1].last = old;
|
||||
} else {
|
||||
old->next = NULL;
|
||||
old->prev = runqueues[core_id].queue[prio-1].last;
|
||||
runqueues[core_id].queue[prio-1].last->next = old;
|
||||
runqueues[core_id].queue[prio-1].last = old;
|
||||
}
|
||||
runqueues[core_id].old_task = NULL;
|
||||
runqueues[core_id].prio_bitmap |= (1 << prio);
|
||||
}
|
||||
runqueues[core_id].old_task = NULL;
|
||||
runqueues[core_id].prio_bitmap |= (1 << prio);
|
||||
}
|
||||
spinlock_irqsave_unlock(&runqueues[core_id].lock);
|
||||
}
|
||||
|
@ -302,6 +313,8 @@ static int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio, uin
|
|||
|
||||
task_table[i].id = i;
|
||||
task_table[i].status = TASK_READY;
|
||||
task_table[i].last_stack_pointer = NULL;
|
||||
task_table[i].stack = create_stack();
|
||||
task_table[i].flags = TASK_DEFAULT_FLAGS;
|
||||
task_table[i].prio = prio;
|
||||
task_table[i].last_core = 0;
|
||||
|
@ -372,6 +385,9 @@ int sys_fork(void)
|
|||
}
|
||||
|
||||
task_table[i].id = i;
|
||||
task_table[i].last_stack_pointer = NULL;
|
||||
task_table[i].stack = create_stack();
|
||||
|
||||
spinlock_init(&task_table[i].vma_lock);
|
||||
|
||||
// copy VMA list
|
||||
|
@ -1347,13 +1363,14 @@ size_t** scheduler(void)
|
|||
orig_task = curr_task = per_core(current_task);
|
||||
curr_task->last_core = core_id;
|
||||
|
||||
/* signalizes that this task could be reused */
|
||||
if (curr_task->status == TASK_FINISHED)
|
||||
curr_task->status = TASK_INVALID;
|
||||
|
||||
spinlock_irqsave_lock(&runqueues[core_id].lock);
|
||||
|
||||
runqueues[core_id].old_task = NULL; // reset old task
|
||||
/* signalizes that this task could be reused */
|
||||
if (curr_task->status == TASK_FINISHED) {
|
||||
curr_task->status = TASK_INVALID;
|
||||
runqueues[core_id].old_task = curr_task;
|
||||
} else runqueues[core_id].old_task = NULL; // reset old task
|
||||
|
||||
prio = msb(runqueues[core_id].prio_bitmap); // determines highest priority
|
||||
#ifndef CONFIG_TICKLESS
|
||||
#if MAX_CORES > 1
|
||||
|
@ -1409,7 +1426,7 @@ get_task_out:
|
|||
|
||||
//kprintf("schedule from %u to %u with prio %u on core %u\n", orig_task->id, curr_task->id, (uint32_t)curr_task->prio, CORE_ID);
|
||||
|
||||
return (size_t**) &(orig_task->stack);
|
||||
return (size_t**) &(orig_task->last_stack_pointer);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
|
|
15
mm/memory.c
15
mm/memory.c
|
@ -380,3 +380,18 @@ void kfree(void* addr, size_t sz)
|
|||
atomic_int32_sub(&total_allocated_pages, npages);
|
||||
atomic_int32_add(&total_available_pages, npages);
|
||||
}
|
||||
|
||||
void* create_stack(void)
|
||||
{
|
||||
return kmalloc(KERNEL_STACK_SIZE);
|
||||
}
|
||||
|
||||
int destroy_stack(task_t* task)
|
||||
{
|
||||
if (BUILTIN_EXPECT(!task || !task->stack, 0))
|
||||
return -EINVAL;
|
||||
|
||||
kfree(task->stack, KERNEL_STACK_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue