From c5b650fc322dc04d5252006af6c5ee3bcf86839f Mon Sep 17 00:00:00 2001 From: Stefan Lankes Date: Thu, 24 Feb 2011 09:37:31 +0100 Subject: [PATCH] add the support of the VMA list into the kernel - after process termination, this list is used to free the allocated memory regions --- include/metalsvm/stdlib.h | 1 + include/metalsvm/tasks_types.h | 5 +- kernel/tasks.c | 101 ++++++++++++++++++++++++++------- mm/memory.c | 12 ++-- 4 files changed, 92 insertions(+), 27 deletions(-) diff --git a/include/metalsvm/stdlib.h b/include/metalsvm/stdlib.h index fbc75148..d04bc9aa 100644 --- a/include/metalsvm/stdlib.h +++ b/include/metalsvm/stdlib.h @@ -35,6 +35,7 @@ extern "C" { #define MAP_STACK (1 << 4) #define MAP_HEAP (1 << 5) #define MAP_CODE (1 << 6) +#define MAP_READONLY (1 << 7) void NORETURN abort(void); void* kmalloc(size_t); diff --git a/include/metalsvm/tasks_types.h b/include/metalsvm/tasks_types.h index 2e376f08..a9d63b97 100644 --- a/include/metalsvm/tasks_types.h +++ b/include/metalsvm/tasks_types.h @@ -21,6 +21,8 @@ #define __TASKS_TYPES_H__ #include +#include +#include #include #ifdef __cplusplus @@ -37,7 +39,6 @@ extern "C" { typedef int (STDCALL *entry_point_t)(void*); struct mailbox_int32; struct page_dir; -struct spinlock; typedef struct task { tid_t id; /* task id = position in the task table */ @@ -45,6 +46,8 @@ typedef struct task { atomic_int32_t mem_usage; /* in number of pages */ struct spinlock* pgd_lock; /* avoids concurrent access to the page directoriy */ struct page_dir* pgd; /* pointer to the page directory */ + spinlock_t vma_lock; + vma_t* vma_list; struct mailbox_int32* mbox[MAX_TASKS]; } __attribute__((packed)) task_t; diff --git a/kernel/tasks.c b/kernel/tasks.c index 1553282b..99af0756 100644 --- a/kernel/tasks.c +++ b/kernel/tasks.c @@ -32,7 +32,7 @@ #include DEFINE_PER_CORE(task_t*, current_task, NULL); -static task_t task_table[MAX_TASKS] = {[0 ... MAX_TASKS-1] = {0, TASK_INVALID, ATOMIC_INIT(0), NULL, NULL}}; +static task_t task_table[MAX_TASKS] = {[0 ... MAX_TASKS-1] = {0, TASK_INVALID, ATOMIC_INIT(0), NULL, NULL, SPINLOCK_INIT, NULL}}; static spinlock_t table_lock = SPINLOCK_INIT; /* @@ -79,11 +79,25 @@ static void wakeup_blocked_tasks(int result) } static void NORETURN do_exit(int arg) { + vma_t* tmp; + kprintf("Terminate task: %u, return value %d\n", per_core(current_task)->id, arg); + //vma_dump(per_core(current_task)); + spinlock_lock(&(per_core(current_task)->vma_lock)); + + // remove memory regions + while((tmp = per_core(current_task)->vma_list) != NULL) { + kfree((void*) tmp->start, tmp->end - tmp->start + 1); + per_core(current_task)->vma_list = tmp->next; + kfree((void*) tmp, sizeof(vma_t)); + } + + spinlock_unlock(&(per_core(current_task)->vma_lock)); + wakeup_blocked_tasks(arg); if (atomic_int32_read(&per_core(current_task)->mem_usage)) - kprintf("Memory leak! Task %d did not release %d bytes\n", per_core(current_task)->id, atomic_int32_read(&per_core(current_task)->mem_usage)); + kprintf("Memory leak! Task %d did not release %d pages\n", per_core(current_task)->id, atomic_int32_read(&per_core(current_task)->mem_usage)); per_core(current_task)->status = TASK_FINISHED; reschedule(); @@ -133,9 +147,11 @@ static int create_task(tid_t* id, entry_point_t ep, void* arg, int user) task_table[i].id = i; task_table[i].status = TASK_READY; - atomic_int32_set(&task_table[i].mem_usage, 0); + atomic_int32_set(&task_table[i].mem_usage, 0); + spinlock_init(&task_table[i].vma_lock); + task_table[i].vma_list = NULL; memset(task_table[i].mbox, 0x00, sizeof(mailbox_int32_t*)*MAX_TASKS); - + if (id) *id = i; @@ -157,11 +173,11 @@ int create_kernel_task(tid_t* id, entry_point_t ep, void* arg) static int STDCALL user_entry(void* arg) { - uint32_t i, addr, npages; + uint32_t i, addr, npages, flags, stack = 0; vfs_node_t* node = (vfs_node_t*) arg; elf_header_t header; elf_program_header_t prog_header; - elf_section_header_t sec_header; + //elf_section_header_t sec_header; if (!node) return -EINVAL; @@ -192,7 +208,9 @@ static int STDCALL user_entry(void* arg) continue; } - if (prog_header.type == ELF_PT_LOAD) { // load program segment + switch(prog_header.type) + { + case ELF_PT_LOAD: // load program segment if (!prog_header.virt_addr) continue; @@ -202,8 +220,12 @@ static int STDCALL user_entry(void* arg) addr = get_pages(npages); + flags = MAP_USER_SPACE; + if (prog_header.flags & PF_X) + flags |= MAP_CODE; + // map page frames in the address space of the current task - if (!map_region(per_core(current_task), prog_header.virt_addr, addr, npages, MAP_USER_SPACE)) + if (!map_region(per_core(current_task), prog_header.virt_addr, addr, npages, flags)) kprintf("Could not map 0x%x at 0x%x\n", addr, prog_header.virt_addr); // clear pages @@ -211,20 +233,49 @@ static int STDCALL user_entry(void* arg) // load program read_fs(node, (uint8_t*)prog_header.virt_addr, prog_header.file_size, prog_header.offset); - } + + flags = VMA_CACHEABLE; + if (prog_header.flags & PF_R) + flags |= VMA_READ; + if (prog_header.flags & PF_W) + flags |= VMA_WRITE; + if (prog_header.flags & PF_X) + flags |= VMA_EXECUTE; + vma_add(per_core(current_task), prog_header.virt_addr, prog_header.virt_addr+npages*PAGE_SIZE-1, flags); + + if (!(prog_header.flags & PF_W)) + change_page_permissions(prog_header.virt_addr, prog_header.virt_addr+npages*PAGE_SIZE-1, flags); + break; + + case ELF_PT_GNU_STACK: // Indicates stack executability + // create user-level stack + npages = DEFAULT_STACK_SIZE / PAGE_SIZE; + if (DEFAULT_STACK_SIZE % PAGE_SIZE) + npages++; + + addr = get_pages(npages); + stack = header.entry*2; // virtual address of the stack + + if (!map_region(per_core(current_task), stack, addr, npages, MAP_USER_SPACE)) { + kprintf("Could not map stack at 0x%x\n", stack); + return -ENOMEM; + } + memset((void*) stack, 0, npages*PAGE_SIZE); + + // create vma regions for the user-level stack + flags = VMA_CACHEABLE; + if (prog_header.flags & PF_R) + flags |= VMA_READ; + if (prog_header.flags & PF_W) + flags |= VMA_WRITE; + if (prog_header.flags & PF_X) + flags |= VMA_EXECUTE; + vma_add(per_core(current_task), stack, stack+npages*PAGE_SIZE-1, flags); + break; + } } - // create user-level stack - npages = DEFAULT_STACK_SIZE / PAGE_SIZE; - if (DEFAULT_STACK_SIZE % PAGE_SIZE) - npages++; - addr = get_pages(npages); - if (!map_region(per_core(current_task), header.entry*2, addr, npages, MAP_USER_SPACE)) { - kprintf("Could not map stack at 0x%x\n", header.entry*2); - return -ENOMEM; - } - memset((void*) (header.entry*2), 0, npages*PAGE_SIZE); - +#if 0 // interpret section header table for (i=0; ipgd_lock); - vm_free(task, (size_t) addr, npages); - spinlock_unlock(task->pgd_lock); - spinlock_lock(&bitmap_lock); for(i=0; ipgd_lock); + vm_free(task, (size_t) addr, npages); + spinlock_unlock(task->pgd_lock); + atomic_int32_sub(&total_allocated_pages, npages); atomic_int32_add(&total_available_pages, npages); atomic_int32_sub(&(task->mem_usage), npages);