add basic support of sbrk and fix bug in saving of the fpu context

This commit is contained in:
Stefan Lankes 2011-04-20 21:23:22 +02:00
parent 8a515c9925
commit faa41e25e2
7 changed files with 113 additions and 34 deletions

View file

@ -169,13 +169,16 @@ static void fpu_handler(struct state *s)
{
task_t* task = per_core(current_task);
kputs("got FPU exception\n");
asm volatile ("clts"); // clear the TS flag of cr0
if (!task->fpu_used) {
task->fpu_used = 1;
if (!(task->flags & TASK_FPU_INIT)) {
// use the FPU at the first time => Initialize FPU
asm volatile ("finit");
} else
task->flags = task->flags|TASK_FPU_INIT|TASK_FPU_USED;
} else {
// restore the FPU context
asm volatile ("frstor %0" :: "m"(task->fpu.fsave)); // restore fpu state
task->flags |= TASK_FPU_USED;
}
}
/** @brief Exception messages

View file

@ -588,7 +588,28 @@ int print_paging_tree(size_t viraddr)
static void pagefault_handler(struct state *s)
{
kprintf("PAGE FAULT: Task %u got page fault at %p (irq %d)\n", per_core(current_task)->id, read_cr2(), s->int_no);
task_t* task = per_core(current_task);
size_t viraddr = read_cr2();
size_t phyaddr;
if ((viraddr >= task->start_heap) && (viraddr <= task->end_heap) && (viraddr > KERNEL_SPACE)) {
viraddr = viraddr & 0xFFFFF000;
phyaddr = get_page();
if (BUILTIN_EXPECT(!phyaddr, 0))
goto default_handler;
if (map_region(viraddr, phyaddr, 1, MAP_USER_SPACE|MAP_HEAP) == viraddr) {
memset((void*) viraddr, 0x00, PAGE_SIZE);
return;
}
kprintf("Could not map 0x%x at 0x%x\n", phyaddr, viraddr);
put_page(phyaddr);
}
default_handler:
kprintf("PAGE FAULT: Task %u got page fault at %p (irq %d)\n", task->id, viraddr, s->int_no);
kprintf("Register state: eax = 0x%x, ebx = 0x%x, ecx = 0x%x, edx = 0x%x, edi = 0x%x, esi = 0x%x, ebp = 0x%x, esp = 0x%x\n",
s->eax, s->ebx, s->ecx, s->edx, s->edi, s->esi, s->ebp, s->esp);

View file

@ -47,6 +47,10 @@ extern "C" {
#define TASK_FINISHED 4
#define TASK_IDLE 5
#define TASK_DEFAULT_FLAGS 0
#define TASK_FPU_INIT (1 << 0)
#define TASK_FPU_USED (1 << 1)
typedef int (STDCALL *entry_point_t)(void*);
struct page_dir;
@ -66,8 +70,12 @@ typedef struct task {
spinlock_t vma_lock;
/// List of VMAs
vma_t* vma_list;
/// Is set, when the FPU is used
uint32_t fpu_used;
/// Additional status flags. For instance, to signalize the using of the FPU
uint32_t flags;
/// Start address of the heap
uint32_t start_heap;
/// End address of the heap
uint32_t end_heap;
/// Mail inbox
mailbox_wait_msg_t inbox;
/// Mail outbox array

View file

@ -22,6 +22,7 @@
#include <metalsvm/syscall.h>
#include <metalsvm/tasks.h>
#include <metalsvm/errno.h>
#include <metalsvm/spinlock.h>
static int sys_write(int fildes, const char *buf, size_t len)
{
@ -37,6 +38,32 @@ static int sys_write(int fildes, const char *buf, size_t len)
return len;
}
static int sys_sbrk(int incr)
{
task_t* task = per_core(current_task);
vma_t* tmp = NULL;
int ret;
spinlock_lock(&task->vma_lock);
tmp = task->vma_list;
while(tmp && !((task->end_heap >= tmp->start) && (task->end_heap <= tmp->end)))
tmp = tmp->next;
ret = (int) task->end_heap;
task->end_heap += incr;
if (task->end_heap < task->start_heap)
task->end_heap = task->start_heap;
// resize virtual memory area
if (tmp && (tmp->end <= task->end_heap))
tmp->end = task->end_heap;
spinlock_unlock(&task->vma_lock);
return ret;
}
int syscall_handler(uint32_t sys_nr, ...)
{
int ret = -EINVAL;
@ -64,6 +91,12 @@ int syscall_handler(uint32_t sys_nr, ...)
case __NR_close:
ret = 0;
break;
case __NR_sbrk: {
int incr = va_arg(vl, int);
ret = sys_sbrk(incr);
break;
}
case __NR_getpid:
ret = per_core(current_task)->id;
break;

View file

@ -48,7 +48,7 @@ DEFINE_PER_CORE(task_t*, current_task, NULL);
* A task's id will be its position in this array.
*/
static task_t task_table[MAX_TASKS] = {[0 ... MAX_TASKS-1] = {0, TASK_INVALID, ATOMIC_INIT(0), \
SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, 0}};
SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, 0, 0, 0}};
static spinlock_irqsave_t table_lock = SPINLOCK_IRQSAVE_INIT;
/** @brief helper function for the assembly code to determine the current task
@ -67,7 +67,7 @@ int multitasking_init(void) {
memset(task_table[0].outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS);
per_core(current_task) = task_table+0;
per_core(current_task)->pgd = get_boot_pgd();
task_table[0].fpu_used = 0;
task_table[0].flags = TASK_DEFAULT_FLAGS;
return 0;
}
@ -190,7 +190,9 @@ static int create_task(tid_t* id, entry_point_t ep, void* arg)
ret = create_default_frame(task_table+i, ep, arg);
task_table[i].fpu_used = 0;
task_table[i].flags = TASK_DEFAULT_FLAGS;
task_table[i].start_heap = 0;
task_table[i].end_heap = 0;
task_table[i].status = TASK_READY;
break;
}
@ -252,7 +254,10 @@ int sys_fork(void)
mailbox_wait_msg_init(&task_table[i].inbox);
memset(task_table[i].outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS);
task_table[i].outbox[per_core(current_task)->id] = &per_core(current_task)->inbox;
task_table[i].fpu_used = 0x00;
task_table[i].flags = per_core(current_task)->flags;
memcpy(&(task_table[i].fpu), &(per_core(current_task)->fpu), sizeof(union fpu_state));
task_table[i].start_heap = 0;
task_table[i].end_heap = 0;
ret = arch_fork(task_table+i);
@ -352,8 +357,8 @@ static int load_task(load_args_t* largs)
if (!prog_header.virt_addr)
continue;
npages = (prog_header.mem_size / PAGE_SIZE);
if (prog_header.mem_size % PAGE_SIZE)
npages = (prog_header.mem_size >> PAGE_SHIFT);
if (prog_header.mem_size & (PAGE_SIZE-1))
npages++;
addr = get_pages(npages);
@ -369,6 +374,10 @@ static int load_task(load_args_t* largs)
// clear pages
memset((void*) prog_header.virt_addr, 0, npages*PAGE_SIZE);
// set starting point of the heap
if (per_core(current_task)->start_heap < prog_header.virt_addr+prog_header.mem_size)
per_core(current_task)->start_heap = per_core(current_task)->end_heap = prog_header.virt_addr+prog_header.mem_size;
// load program
read_fs(node, (uint8_t*)prog_header.virt_addr, prog_header.file_size, prog_header.offset);
@ -387,8 +396,8 @@ static int load_task(load_args_t* largs)
case ELF_PT_GNU_STACK: // Indicates stack executability
// create user-level stack
npages = DEFAULT_STACK_SIZE / PAGE_SIZE;
if (DEFAULT_STACK_SIZE % PAGE_SIZE)
npages = DEFAULT_STACK_SIZE >> PAGE_SHIFT;
if (DEFAULT_STACK_SIZE & (PAGE_SIZE-1))
npages++;
addr = get_pages(npages);
@ -475,6 +484,9 @@ static int load_task(load_args_t* largs)
kfree(largs, sizeof(load_args_t));
// clear fpu state
per_core(current_task)->flags &= ~(TASK_FPU_USED|TASK_FPU_INIT);
jump_to_user_code(header.entry, stack+offset);
return 0;
@ -703,6 +715,12 @@ void scheduler(void)
if (per_core(current_task)->status == TASK_FINISHED)
per_core(current_task)->status = TASK_INVALID;
/* if the task is using the FPU, we need to save the FPU context */
if (per_core(current_task)->flags & TASK_FPU_USED) {
save_fpu_state(&(per_core(current_task)->fpu));
per_core(current_task)->flags &= ~TASK_FPU_USED;
}
for(i=1, new_id=(per_core(current_task)->id + 1) % MAX_TASKS;
i<MAX_TASKS; i++, new_id=(new_id+1) % MAX_TASKS)
{
@ -710,10 +728,8 @@ void scheduler(void)
if (per_core(current_task)->status == TASK_RUNNING)
per_core(current_task)->status = TASK_READY;
task_table[new_id].status = TASK_RUNNING;
if (per_core(current_task)->fpu_used)
save_fpu_state(&(per_core(current_task)->fpu));
per_core(current_task) = task_table+new_id;
goto get_task_out;
}
}

View file

@ -114,6 +114,7 @@ int test_init(void)
//create_kernel_task(NULL, consumer, NULL);
//create_user_task(NULL, "/bin/hello", argv);
create_user_task(NULL, "/bin/tests", argv);
//create_user_task(NULL, "/bin/jacobi", argv);
return 0;
}

View file

@ -22,23 +22,20 @@
#include <errno.h>
#undef errno
extern int errno;
#include "warning.h"
#include "syscall.h"
#ifndef NULL
#define NULL ((void*) 0)
#endif
void*
_DEFUN (sbrk, (incr),
int incr)
{
int ret;
void *
sbrk (incr)
int incr;
{
extern char _end; // set by linker
static char *heap_end = NULL;
char *prev_heap_end;
ret = SYSCALL1(__NR_sbrk, incr);
if (ret < 0x1000) {
errno = -ret;
ret = -1;
}
if (!heap_end)
heap_end = &_end;
prev_heap_end = heap_end;
heap_end += incr;
return (void *) prev_heap_end;
return (void*) ret;
}