implemented userspace task loading and userpsace heap management with the new vma_list

This commit is contained in:
Steffen Vogel 2014-01-09 14:06:09 +01:00
parent a00177ec09
commit 11977e40bc
4 changed files with 90 additions and 88 deletions

View file

@ -53,6 +53,7 @@ page_map_t* get_boot_page_map(void)
return &boot_pml4;
}
// TODO
size_t virt_to_phys(size_t viraddr) {
task_t* task = per_core(current_task);
@ -600,28 +601,25 @@ static void pagefault_handler(struct state *s)
task_t* task = per_core(current_task);
size_t viraddr = read_cr2();
#if 0
if ((viraddr >= task->start_heap) && (viraddr <= task->end_heap) && (viraddr > KERNEL_SPACE)) {
viraddr = viraddr & PAGE_MASK;
phyaddr = get_page();
if (BUILTIN_EXPECT(!phyaddr, 0))
goto oom;
if (map_region(viraddr, phyaddr, 1, MAP_USER_SPACE) == viraddr) {
memset((void*) viraddr, 0x00, PAGE_SIZE);
return;
}
kprintf("Could not map 0x%x at 0x%x\n", phyaddr, viraddr);
put_page(phyaddr);
}
#endif
// on demand userspace heap mapping
if ((task->heap) && (viraddr >= task->heap->start) && (viraddr < task->heap->end)) {
viraddr &= PAGE_MASK;
size_t phyaddr = get_page();
if (BUILTIN_EXPECT(!phyaddr, 0)) {
kprintf("out of memory: task = %u\n", task->id);
goto default_handler;
}
viraddr = map_region(viraddr, phyaddr, 1, MAP_USER_SPACE);
if (BUILTIN_EXPECT(!viraddr, 0)) {
kprintf("map_region: could not map %#lx to %#lx, task = %u\n", viraddr, phyaddr, task->id);
put_page(phyaddr);
goto default_handler;
}
memset((void*) viraddr, 0x00, PAGE_SIZE); // fill with zeros
return;
}

View file

@ -104,6 +104,8 @@ typedef struct task {
size_t start_heap;
/// end address of the heap
size_t end_heap;
/// the userspace heap
vma_t* heap;
/// LwIP error code
int lwip_err;
/// mail inbox

View file

@ -236,7 +236,7 @@ static int sys_accept(int s, struct sockaddr* addr, socklen_t* addrlen)
curr_task->fildes_table[fd]->offset = sock2;
curr_task->fildes_table[fd]->count = 1;
curr_task->fildes_table[fd]->node = findnode_fs("/dev/socket");
/* file doesn't exist! */
if (curr_task->fildes_table[fd]->node == NULL) {
/* tidy up the fildescriptor */
@ -381,7 +381,7 @@ static int sys_dup2(int fd, int fd2)
/* If fd and fd2 are equal, then dup2() just returns fd2 */
if (fd == fd2)
return fd2;
/*
* if descriptor fd2 is already in use, it is first deallocated
* as if a close(2) call had been done first
@ -398,29 +398,25 @@ static int sys_dup2(int fd, int fd2)
static int sys_sbrk(int incr)
{
task_t* task = per_core(current_task);
vma_t* tmp = NULL;
vma_t* heap = task->heap;
int ret;
spinlock_lock(&task->vma_lock);
// search vma containing the heap
tmp = task->vma_list;
while(tmp && !((task->end_heap >= tmp->start) && (task->end_heap <= tmp->end)))
tmp = tmp->next;
if (BUILTIN_EXPECT(!heap,0 )) {
kprintf("sys_sbrk: missing heap!\n");
abort();
}
ret = (int) task->end_heap;
task->end_heap += incr;
if (task->end_heap < task->start_heap)
task->end_heap = task->start_heap;
// resize virtual memory area
if (tmp && (tmp->end <= task->end_heap))
tmp->end = task->end_heap;
ret = heap->end;
heap->end += incr;
if (heap->end < heap->start)
heap->end = heap->start;
// allocation and mapping of new pages for the heap
// is catched by the pagefault handler
//kprintf("sys_sbrk: tid=%d, start_heap=%8x, end_heap=%8x, incr=%4x\n", task->id, task->start_heap, task->end_heap, incr);
kprintf("sys_sbrk: task = %d, heap->start = %#lx, heap->end = %#lx, incr = %#4x\n", task->id, heap->start, heap->end, incr); // TOD0: remove
spinlock_unlock(&task->vma_lock);
@ -506,7 +502,7 @@ int syscall_handler(size_t sys_nr, ...)
break;
case __NR_wait: {
int32_t* status = va_arg(vl, int32_t*);
ret = wait(status);
break;
}
@ -555,7 +551,7 @@ int syscall_handler(size_t sys_nr, ...)
ret = -ENOTSOCK;
break;
}
//kprintf("lwip_connect: %p with lenght %i and Socket %i", name, namelen, per_core(current_task)->fildes_table[fd].offset);
//kprintf("lwip_connect: %p with lenght %i and Socket %i", name, namelen, per_core(current_task)->fildes_table[fd].offset); // TODO: remove
ret = lwip_connect(per_core(current_task)->fildes_table[fd]->offset, name, namelen);

View file

@ -586,16 +586,15 @@ static int load_task(load_args_t* largs)
{
uint32_t i, offset, idx, fd_i;
uint32_t addr, npages, flags;
size_t stack = 0;
size_t stack = 0, heap = 0;
elf_header_t header;
elf_program_header_t prog_header;
//elf_section_header_t sec_header;
///!!! kfree is missing!
fildes_t *file = kmalloc(sizeof(fildes_t));
fildes_t *file = kmalloc(sizeof(fildes_t)); // TODO: kfree is missing!
file->offset = 0;
file->flags = 0;
//TODO: init the hole fildes_t struct!
// TODO: init the hole fildes_t struct!
task_t* curr_task = per_core(current_task);
int err;
@ -606,22 +605,22 @@ static int load_task(load_args_t* largs)
if (!file->node)
return -EINVAL;
/* init fildes_table */
// init fildes_table
spinlock_irqsave_lock(&table_lock);
if (!task_table[curr_task->id].fildes_table) {
task_table[curr_task->id].fildes_table = kmalloc(sizeof(filp_t)*NR_OPEN);
if (BUILTIN_EXPECT(!task_table[curr_task->id].fildes_table, 0)) {
if (!curr_task->fildes_table) {
curr_task->fildes_table = kmalloc(sizeof(filp_t)*NR_OPEN);
if (BUILTIN_EXPECT(!curr_task->fildes_table, 0)) {
spinlock_irqsave_unlock(&table_lock);
return -ENOMEM;
}
memset(task_table[curr_task->id].fildes_table, 0x00, sizeof(filp_t)*NR_OPEN);
memset(curr_task->fildes_table, 0x00, sizeof(filp_t)*NR_OPEN);
for (fd_i = 0; fd_i < 3; fd_i++) {
task_table[curr_task->id].fildes_table[fd_i] = kmalloc(sizeof(fildes_t));
task_table[curr_task->id].fildes_table[fd_i]->count = 1;
curr_task->fildes_table[fd_i] = kmalloc(sizeof(fildes_t));
curr_task->fildes_table[fd_i]->count = 1;
}
task_table[curr_task->id].fildes_table[0]->node = findnode_fs("/dev/stdin");
task_table[curr_task->id].fildes_table[1]->node = findnode_fs("/dev/stdout");
task_table[curr_task->id].fildes_table[2]->node = findnode_fs("/dev/stderr");
curr_task->fildes_table[0]->node = findnode_fs("/dev/stdin");
curr_task->fildes_table[1]->node = findnode_fs("/dev/stdout");
curr_task->fildes_table[2]->node = findnode_fs("/dev/stderr");
}
spinlock_irqsave_unlock(&table_lock);
@ -642,43 +641,43 @@ static int load_task(load_args_t* largs)
if (BUILTIN_EXPECT(header.ident._class != ELF_CLASS_32, 0))
goto invalid;
#else
#elif defined(CONFIG_X86_64)
if (BUILTIN_EXPECT(header.machine != ELF_EM_X86_64, 0))
goto invalid;
if (BUILTIN_EXPECT(header.ident._class != ELF_CLASS_64, 0))
goto invalid;
#else
#error "unknown arch"
#endif
if (BUILTIN_EXPECT(header.ident.data != ELF_DATA_2LSB, 0))
goto invalid;
if (header.entry <= KERNEL_SPACE)
if (header.entry < KERNEL_SPACE)
goto invalid;
// interpret program header table
for (i=0; i<header.ph_entry_count; i++) {
file->offset = header.ph_offset+i*header.ph_entry_size;
if (read_fs(file, (uint8_t*)&prog_header, sizeof(elf_program_header_t)) == 0) {
if (read_fs(file, (uint8_t*) &prog_header, sizeof(elf_program_header_t)) == 0) {
kprintf("Could not read programm header!\n");
continue;
}
switch(prog_header.type)
{
switch(prog_header.type) {
case ELF_PT_LOAD: // load program segment
if (!prog_header.virt_addr)
continue;
npages = (prog_header.mem_size >> PAGE_SHIFT);
if (prog_header.mem_size & (PAGE_SIZE-1))
npages++;
npages = PAGE_FLOOR(prog_header.mem_size) >> PAGE_BITS;
addr = get_pages(npages);
flags = MAP_USER_SPACE;
if (prog_header.flags & PF_X)
flags |= MAP_CODE;
if (!(prog_header.flags & PF_W))
flags |= MAP_READ_ONLY;
// map page frames in the address space of the current task
if (!map_region(prog_header.virt_addr, addr, npages, flags)) {
@ -687,35 +686,30 @@ static int load_task(load_args_t* largs)
}
// clear pages
memset((void*) prog_header.virt_addr, 0x00, npages*PAGE_SIZE);
memset((void*) prog_header.virt_addr, 0x00, npages * PAGE_SIZE);
// set starting point of the heap
if (curr_task->start_heap < prog_header.virt_addr+prog_header.mem_size)
curr_task->start_heap = curr_task->end_heap = prog_header.virt_addr+prog_header.mem_size;
// update heap location
if (heap < prog_header.virt_addr + prog_header.mem_size)
heap = prog_header.virt_addr+prog_header.mem_size;
// load program
file->offset = prog_header.offset;
read_fs(file, (uint8_t*)prog_header.virt_addr, prog_header.file_size);
read_fs(file, (uint8_t*) prog_header.virt_addr, prog_header.file_size);
flags = VMA_CACHEABLE;
flags = VMA_CACHEABLE | VMA_USER;
if (prog_header.flags & PF_R)
flags |= VMA_READ;
if (prog_header.flags & PF_W)
flags |= VMA_WRITE;
if (prog_header.flags & PF_X)
flags |= VMA_EXECUTE;
vma_add(prog_header.virt_addr, prog_header.virt_addr+npages*PAGE_SIZE-1, flags);
if (!(prog_header.flags & PF_W))
change_page_permissions(prog_header.virt_addr, prog_header.virt_addr+npages*PAGE_SIZE-1, flags);
vma_add(prog_header.virt_addr, prog_header.virt_addr+npages*PAGE_SIZE, flags);
break;
case ELF_PT_GNU_STACK: // Indicates stack executability
// create user-level stack
npages = DEFAULT_STACK_SIZE >> PAGE_SHIFT;
if (DEFAULT_STACK_SIZE & (PAGE_SIZE-1))
npages++;
npages = PAGE_FLOOR(DEFAULT_STACK_SIZE) >> PAGE_BITS;
addr = get_pages(npages);
stack = header.entry*2; // virtual address of the stack
@ -733,7 +727,8 @@ static int load_task(load_args_t* largs)
flags |= VMA_WRITE;
if (prog_header.flags & PF_X)
flags |= VMA_EXECUTE;
vma_add(stack, stack+npages*PAGE_SIZE-1, flags);
vma_add(stack, stack+npages*PAGE_SIZE, flags);
break;
}
}
@ -751,8 +746,23 @@ static int load_task(load_args_t* largs)
}
#endif
// setup heap
if (!curr_task->heap)
curr_task->heap = (vma_t*) kmalloc(sizeof(vma_t));
if (BUILTIN_EXPECT(!curr_task->heap || !heap, 0)) {
kprintf("load_task: heap is missing!\n");
return -ENOMEM;
}
curr_task->heap->flags = VMA_HEAP|VMA_USER;
curr_task->heap->start = heap;
curr_task->heap->end = heap;
// TODO: insert into list
if (BUILTIN_EXPECT(!stack, 0)) {
kprintf("Stack is missing!\n");
kprintf("load_task: stack is missing!\n");
return -ENOMEM;
}
@ -765,9 +775,9 @@ static int load_task(load_args_t* largs)
// push argv on the stack
offset -= largs->argc * sizeof(char*);
for(i=0; i<largs->argc; i++) {
for (i=0; i<largs->argc; i++) {
((char**) (stack+offset))[i] = (char*) (stack+idx);
while(((char*) stack)[idx] != '\0')
idx++;
idx++;
@ -775,7 +785,7 @@ static int load_task(load_args_t* largs)
// push env on the stack
offset -= (largs->envc+1) * sizeof(char*);
for(i=0; i<largs->envc; i++) {
for (i=0; i<largs->envc; i++) {
((char**) (stack+offset))[i] = (char*) (stack+idx);
while(((char*) stack)[idx] != '\0')
@ -796,7 +806,7 @@ static int load_task(load_args_t* largs)
*((char***) (stack+offset)) = (char**) (stack + offset + 2*sizeof(char**) + (largs->envc+1) * sizeof(char*));
// push argc on the stack
offset -= sizeof(int);
offset -= sizeof(size_t);
*((int*) (stack+offset)) = largs->argc;
kfree(largs);
@ -810,12 +820,12 @@ static int load_task(load_args_t* largs)
invalid:
kprintf("Invalid executable!\n");
kprintf("magic number 0x%x\n", (uint32_t) header.ident.magic);
kprintf("header type 0x%x\n", (uint32_t) header.type);
kprintf("machine type 0x%x\n", (uint32_t) header.machine);
kprintf("elf ident class 0x%x\n", (uint32_t) header.ident._class);
kprintf("elf identdata !0x%x\n", header.ident.data);
kprintf("program entry point 0x%x\n", (size_t) header.entry);
kprintf("Magic number: 0x%x\n", (uint32_t) header.ident.magic);
kprintf("Header type: 0x%x\n", (uint32_t) header.type);
kprintf("Machine type: 0x%x\n", (uint32_t) header.machine);
kprintf("ELF ident class: 0x%x\n", (uint32_t) header.ident._class);
kprintf("ELF ident data: 0x%x\n", header.ident.data);
kprintf("Program entry point: 0x%x\n", (size_t) header.entry);
return -EINVAL;
}
@ -849,7 +859,6 @@ static int user_entry(void* arg)
*/
int create_user_task_on_core(tid_t* id, const char* fname, char** argv, uint32_t core_id)
{
#ifdef CONFIG_X86_32
vfs_node_t* node;
int argc = 0;
size_t i, buffer_size = 0;
@ -885,11 +894,8 @@ int create_user_task_on_core(tid_t* id, const char* fname, char** argv, uint32_t
while ((*dest++ = *src++) != 0);
}
/* create new task */
// create new task
return create_task(id, user_entry, load_args, NORMAL_PRIO, core_id);
#else
return -EINVAL;
#endif
}
/** @brief Used by the execve-Systemcall */