add the support of the VMA list into the kernel

- after process termination, this list is used to free the allocated memory regions
This commit is contained in:
Stefan Lankes 2011-02-24 09:37:31 +01:00
parent b4884cde2d
commit c5b650fc32
4 changed files with 92 additions and 27 deletions

View file

@ -35,6 +35,7 @@ extern "C" {
#define MAP_STACK (1 << 4)
#define MAP_HEAP (1 << 5)
#define MAP_CODE (1 << 6)
#define MAP_READONLY (1 << 7)
void NORETURN abort(void);
void* kmalloc(size_t);

View file

@ -21,6 +21,8 @@
#define __TASKS_TYPES_H__
#include <metalsvm/stddef.h>
#include <metalsvm/vma.h>
#include <metalsvm/spinlock_types.h>
#include <asm/atomic.h>
#ifdef __cplusplus
@ -37,7 +39,6 @@ extern "C" {
typedef int (STDCALL *entry_point_t)(void*);
struct mailbox_int32;
struct page_dir;
struct spinlock;
typedef struct task {
tid_t id; /* task id = position in the task table */
@ -45,6 +46,8 @@ typedef struct task {
atomic_int32_t mem_usage; /* in number of pages */
struct spinlock* pgd_lock; /* avoids concurrent access to the page directoriy */
struct page_dir* pgd; /* pointer to the page directory */
spinlock_t vma_lock;
vma_t* vma_list;
struct mailbox_int32* mbox[MAX_TASKS];
} __attribute__((packed)) task_t;

View file

@ -32,7 +32,7 @@
#include <asm/elf.h>
DEFINE_PER_CORE(task_t*, current_task, NULL);
static task_t task_table[MAX_TASKS] = {[0 ... MAX_TASKS-1] = {0, TASK_INVALID, ATOMIC_INIT(0), NULL, NULL}};
static task_t task_table[MAX_TASKS] = {[0 ... MAX_TASKS-1] = {0, TASK_INVALID, ATOMIC_INIT(0), NULL, NULL, SPINLOCK_INIT, NULL}};
static spinlock_t table_lock = SPINLOCK_INIT;
/*
@ -79,11 +79,25 @@ static void wakeup_blocked_tasks(int result)
}
static void NORETURN do_exit(int arg) {
vma_t* tmp;
kprintf("Terminate task: %u, return value %d\n", per_core(current_task)->id, arg);
//vma_dump(per_core(current_task));
spinlock_lock(&(per_core(current_task)->vma_lock));
// remove memory regions
while((tmp = per_core(current_task)->vma_list) != NULL) {
kfree((void*) tmp->start, tmp->end - tmp->start + 1);
per_core(current_task)->vma_list = tmp->next;
kfree((void*) tmp, sizeof(vma_t));
}
spinlock_unlock(&(per_core(current_task)->vma_lock));
wakeup_blocked_tasks(arg);
if (atomic_int32_read(&per_core(current_task)->mem_usage))
kprintf("Memory leak! Task %d did not release %d bytes\n", per_core(current_task)->id, atomic_int32_read(&per_core(current_task)->mem_usage));
kprintf("Memory leak! Task %d did not release %d pages\n", per_core(current_task)->id, atomic_int32_read(&per_core(current_task)->mem_usage));
per_core(current_task)->status = TASK_FINISHED;
reschedule();
@ -133,9 +147,11 @@ static int create_task(tid_t* id, entry_point_t ep, void* arg, int user)
task_table[i].id = i;
task_table[i].status = TASK_READY;
atomic_int32_set(&task_table[i].mem_usage, 0);
atomic_int32_set(&task_table[i].mem_usage, 0);
spinlock_init(&task_table[i].vma_lock);
task_table[i].vma_list = NULL;
memset(task_table[i].mbox, 0x00, sizeof(mailbox_int32_t*)*MAX_TASKS);
if (id)
*id = i;
@ -157,11 +173,11 @@ int create_kernel_task(tid_t* id, entry_point_t ep, void* arg)
static int STDCALL user_entry(void* arg)
{
uint32_t i, addr, npages;
uint32_t i, addr, npages, flags, stack = 0;
vfs_node_t* node = (vfs_node_t*) arg;
elf_header_t header;
elf_program_header_t prog_header;
elf_section_header_t sec_header;
//elf_section_header_t sec_header;
if (!node)
return -EINVAL;
@ -192,7 +208,9 @@ static int STDCALL user_entry(void* arg)
continue;
}
if (prog_header.type == ELF_PT_LOAD) { // load program segment
switch(prog_header.type)
{
case ELF_PT_LOAD: // load program segment
if (!prog_header.virt_addr)
continue;
@ -202,8 +220,12 @@ static int STDCALL user_entry(void* arg)
addr = get_pages(npages);
flags = MAP_USER_SPACE;
if (prog_header.flags & PF_X)
flags |= MAP_CODE;
// map page frames in the address space of the current task
if (!map_region(per_core(current_task), prog_header.virt_addr, addr, npages, MAP_USER_SPACE))
if (!map_region(per_core(current_task), prog_header.virt_addr, addr, npages, flags))
kprintf("Could not map 0x%x at 0x%x\n", addr, prog_header.virt_addr);
// clear pages
@ -211,20 +233,49 @@ static int STDCALL user_entry(void* arg)
// load program
read_fs(node, (uint8_t*)prog_header.virt_addr, prog_header.file_size, prog_header.offset);
}
flags = VMA_CACHEABLE;
if (prog_header.flags & PF_R)
flags |= VMA_READ;
if (prog_header.flags & PF_W)
flags |= VMA_WRITE;
if (prog_header.flags & PF_X)
flags |= VMA_EXECUTE;
vma_add(per_core(current_task), prog_header.virt_addr, prog_header.virt_addr+npages*PAGE_SIZE-1, flags);
if (!(prog_header.flags & PF_W))
change_page_permissions(prog_header.virt_addr, prog_header.virt_addr+npages*PAGE_SIZE-1, flags);
break;
case ELF_PT_GNU_STACK: // Indicates stack executability
// create user-level stack
npages = DEFAULT_STACK_SIZE / PAGE_SIZE;
if (DEFAULT_STACK_SIZE % PAGE_SIZE)
npages++;
addr = get_pages(npages);
stack = header.entry*2; // virtual address of the stack
if (!map_region(per_core(current_task), stack, addr, npages, MAP_USER_SPACE)) {
kprintf("Could not map stack at 0x%x\n", stack);
return -ENOMEM;
}
memset((void*) stack, 0, npages*PAGE_SIZE);
// create vma regions for the user-level stack
flags = VMA_CACHEABLE;
if (prog_header.flags & PF_R)
flags |= VMA_READ;
if (prog_header.flags & PF_W)
flags |= VMA_WRITE;
if (prog_header.flags & PF_X)
flags |= VMA_EXECUTE;
vma_add(per_core(current_task), stack, stack+npages*PAGE_SIZE-1, flags);
break;
}
}
// create user-level stack
npages = DEFAULT_STACK_SIZE / PAGE_SIZE;
if (DEFAULT_STACK_SIZE % PAGE_SIZE)
npages++;
addr = get_pages(npages);
if (!map_region(per_core(current_task), header.entry*2, addr, npages, MAP_USER_SPACE)) {
kprintf("Could not map stack at 0x%x\n", header.entry*2);
return -ENOMEM;
}
memset((void*) (header.entry*2), 0, npages*PAGE_SIZE);
#if 0
// interpret section header table
for (i=0; i<header.sh_entry_count; i++) {
if (read_fs(node, (uint8_t*)&sec_header, sizeof(elf_section_header_t), header.sh_offset+i*header.sh_entry_size) == 0) {
@ -232,10 +283,16 @@ static int STDCALL user_entry(void* arg)
continue;
}
// TODO: set page permissions
// TODO: interpret section header
}
#endif
if (BUILTIN_EXPECT(!stack, 0)) {
kprintf("Stack is missing!\n");
return -ENOMEM;
}
jump_to_user_code(header.entry, header.entry*2+npages*PAGE_SIZE-64);
jump_to_user_code(header.entry, stack+DEFAULT_STACK_SIZE-64);
return 0;

View file

@ -277,18 +277,22 @@ static void task_free(task_t* task, void* addr, size_t sz)
npages++;
spinlock_lock(task->pgd_lock);
vm_free(task, (size_t) addr, npages);
spinlock_unlock(task->pgd_lock);
spinlock_lock(&bitmap_lock);
for(i=0; i<npages; i++) {
phyaddr = virt_to_phys(task, (size_t) addr+i*PAGE_SIZE);
if (!phyaddr)
continue;
index = phyaddr / PAGE_SIZE;
page_clear_mark(index);
}
spinlock_unlock(&bitmap_lock);
spinlock_lock(task->pgd_lock);
vm_free(task, (size_t) addr, npages);
spinlock_unlock(task->pgd_lock);
atomic_int32_sub(&total_allocated_pages, npages);
atomic_int32_add(&total_available_pages, npages);
atomic_int32_sub(&(task->mem_usage), npages);