Merge branch 'vma_kernel' into vogel

Conflicts:
	kernel/tasks.c
This commit is contained in:
Steffen Vogel 2013-11-20 14:00:04 +01:00
commit a972efe288
6 changed files with 443 additions and 285 deletions

View file

@ -23,11 +23,13 @@
#include <metalsvm/mmu.h>
#include <metalsvm/time.h>
#include <metalsvm/tasks.h>
#include <metalsvm/vma.h>
#include <asm/page.h>
#include <asm/processor.h>
#define PAGE_COUNT 10
#define SIZE (PAGE_COUNT*PAGE_SIZE)
#define VIRT_FROM_ADDR 0x100000000000
#define VIRT_TO_ADDR 0x200000000000
@ -168,11 +170,55 @@ static void paging(void)
//sleep(3);
}
/** @brief Test of the VMA allocator */
static void vma(void)
{
int ret;
// vma_alloc
size_t a1 = vma_alloc(SIZE, VMA_HEAP);
test(a1, "vma_alloc(0x%x, 0x%x) = 0x%lx", SIZE, VMA_HEAP, a1);
vma_dump();
size_t a2 = vma_alloc(SIZE, VMA_HEAP|VMA_USER);
test(a2 != 0, "vma_alloc(0x%x, 0x%x) = 0x%lx", SIZE, VMA_HEAP|VMA_USER, a2);
vma_dump();
// vma_add
ret = vma_add(VIRT_FROM_ADDR, VIRT_FROM_ADDR+SIZE, VMA_HEAP|VMA_USER);
test(ret >= 0, "vma_add(0x%lx, 0x%lx, 0x%x) = %u", VIRT_FROM_ADDR, VIRT_FROM_ADDR+SIZE, VMA_HEAP|VMA_USER, ret);
vma_dump();
ret = vma_add(VIRT_FROM_ADDR+SIZE, VIRT_FROM_ADDR+2*SIZE, VMA_HEAP|VMA_USER);
test(ret >= 0, "vma_add(0x%lx, 0x%lx, 0x%x) = %u", VIRT_FROM_ADDR+SIZE, VIRT_FROM_ADDR+2*SIZE, VMA_HEAP|VMA_USER, ret);
vma_dump();
ret = vma_add(VIRT_FROM_ADDR-SIZE, VIRT_FROM_ADDR, VMA_HEAP|VMA_USER);
test(ret >= 0, "vma_add(0x%lx, 0x%lx, 0x%x) = %u", VIRT_FROM_ADDR-SIZE, VIRT_FROM_ADDR, VMA_HEAP|VMA_USER, ret);
vma_dump();
// vma_free
ret = vma_free(VIRT_FROM_ADDR-SIZE, VIRT_FROM_ADDR);
test(ret >= 0, "vma_free(0x%lx, 0x%lx) = %u", VIRT_FROM_ADDR-SIZE, VIRT_FROM_ADDR, ret);
vma_dump();
ret = vma_free(VIRT_FROM_ADDR+SIZE, VIRT_FROM_ADDR+2*SIZE);
test(ret >= 0, "vma_free(0x%lx, 0x%lx) = %u", VIRT_FROM_ADDR+SIZE, VIRT_FROM_ADDR+2*SIZE, ret);
vma_dump();
ret = vma_free(VIRT_FROM_ADDR, VIRT_FROM_ADDR+SIZE);
test(ret >= 0, "vma_free(0x%lx, 0x%lx) = %u", VIRT_FROM_ADDR, VIRT_FROM_ADDR+SIZE, ret);
vma_dump();
}
/** @brief This is a simple procedure to test memory management subsystem */
int memory(void* arg)
{
kprintf("======== PAGING: test started...\n");
paging();
kprintf("======== VMA: test started...\n");
vma();
kprintf("======== All tests finished successfull...\n");

View file

@ -405,91 +405,6 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
return -EINVAL;
}
/*
* Use the first fit algorithm to find a valid address range
*
* TODO: O(n) => bad performance, we need a better approach
*/
size_t vm_alloc(uint32_t npages, uint32_t flags)
{
task_t* task = per_core(current_task);
size_t viraddr, i, j, ret = 0;
size_t start, end;
page_map_t* pdpt, * pgd, * pgt;
uint16_t index_pml4, index_pdpt;
uint16_t index_pgd, index_pgt;
if (BUILTIN_EXPECT(!task || !task->page_map || !paging_enabled, 0))
return 0;
if (flags & MAP_KERNEL_SPACE) {
start = (((size_t) &kernel_end) + 10*PAGE_SIZE) & PAGE_MASK;
end = (KERNEL_SPACE - PAGE_SIZE) & PAGE_MASK;
} else {
start = KERNEL_SPACE & PAGE_MASK;
end = PAGE_MASK;
}
if (BUILTIN_EXPECT(!npages, 0))
return 0;
if (flags & MAP_KERNEL_SPACE)
spinlock_lock(&kslock);
else
spinlock_irqsave_lock(&task->page_lock);
viraddr = i = start;
j = 0;
do {
index_pml4 = (viraddr >> 39) & 0x1FF;
index_pdpt = (viraddr >> 30) & 0x1FF;
index_pgd = (viraddr >> 21) & 0x1FF;
index_pgt = (viraddr >> 12) & 0x1FF;
// Currently, we allocate pages only in kernel space.
// => physical address of the page table is identical of the virtual address
pdpt = (page_map_t*) (task->page_map->entries[index_pml4] & PAGE_MASK);
if (!pdpt) {
i += (size_t)PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES*PAGE_SIZE;
j += PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES;
continue;
}
pgd = (page_map_t*) (pdpt->entries[index_pdpt] & PAGE_MASK);
if (!pgd) {
i += PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES*PAGE_SIZE;
j += PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES;
continue;
}
pgt = (page_map_t*) (pgd->entries[index_pgd] & PAGE_MASK);
if (!pgt) {
i += PAGE_MAP_ENTRIES*PAGE_SIZE;
j += PAGE_MAP_ENTRIES;
continue;
}
if (!(pgt->entries[index_pgt])) {
i += PAGE_SIZE;
j++;
} else {
// restart search
j = 0;
viraddr = i + PAGE_SIZE;
i = i + PAGE_SIZE;
}
} while((j < npages) && (i<=end));
if ((j >= npages) && (viraddr < end))
ret = viraddr;
if (flags & MAP_KERNEL_SPACE)
spinlock_unlock(&kslock);
else
spinlock_irqsave_unlock(&task->page_lock);
return ret;
}
int unmap_region(size_t viraddr, uint32_t npages)
{
@ -558,70 +473,6 @@ int unmap_region(size_t viraddr, uint32_t npages)
return 0;
}
int vm_free(size_t viraddr, uint32_t npages)
{
task_t* task = per_core(current_task);
page_map_t* pdpt, * pgd, * pgt;
size_t i;
uint16_t index_pml4, index_pdpt;
uint16_t index_pgd, index_pgt;
if (BUILTIN_EXPECT(!task || !task->page_map || !paging_enabled, 0))
return -EINVAL;
if (viraddr <= KERNEL_SPACE)
spinlock_lock(&kslock);
else
spinlock_irqsave_lock(&task->page_lock);
i = 0;
while(i<npages)
{
index_pml4 = (viraddr >> 39) & 0x1FF;
index_pdpt = (viraddr >> 30) & 0x1FF;
index_pgd = (viraddr >> 21) & 0x1FF;
index_pgt = (viraddr >> 12) & 0x1FF;
// Currently, we allocate pages only in kernel space.
// => physical address of the page table is identical of the virtual address
pdpt = (page_map_t*) (task->page_map->entries[index_pml4] & PAGE_MASK);
if (!pdpt) {
viraddr += (size_t) PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES*PAGE_SIZE;
i += PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES;
continue;
}
pgd = (page_map_t*) (pdpt->entries[index_pdpt] & PAGE_MASK);
if (!pgd) {
viraddr += PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES*PAGE_SIZE;
i += PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES;
continue;
}
pgt = (page_map_t*) (pgd->entries[index_pgd] & PAGE_MASK);
if (!pgt) {
viraddr += PAGE_MAP_ENTRIES*PAGE_SIZE;
i += PAGE_MAP_ENTRIES;
continue;
}
if (pgt->entries[index_pgt])
pgt->entries[index_pgt] = 0;
viraddr +=PAGE_SIZE;
i++;
tlb_flush_one_page(viraddr);
}
if (viraddr <= KERNEL_SPACE)
spinlock_unlock(&kslock);
else
spinlock_irqsave_unlock(&task->page_lock);
return 0;
}
static void pagefault_handler(struct state *s)
{
task_t* task = per_core(current_task);

View file

@ -27,56 +27,102 @@
#define __VMA_H__
#include <metalsvm/stddef.h>
#include <asm/page.h>
#ifdef __cplusplus
extern "C" {
#endif
/// Read access to this VMA is allowed
#define VMA_READ (1 << 0)
/// Write access to this VMA is allowed
#define VMA_WRITE (1 << 1)
/// Instructions fetches in this VMA are allowed
#define VMA_EXECUTE (1 << 2)
/// This VMA is cacheable
#define VMA_CACHEABLE (1 << 3)
#define VMA_NOACCESS (1 << 4)
/// This VMA is not accessable
#define VMA_NO_ACCESS (1 << 4)
/// This VMA should be part of the userspace
#define VMA_USER (1 << 5)
/// A collection of flags used for the kernel heap (kmalloc)
#define VMA_HEAP (VMA_READ|VMA_WRITE|VMA_CACHEABLE)
// boundaries for VAS allocation
extern const void kernel_end;
//#define VMA_KERN_MIN (((size_t) &kernel_end + PAGE_SIZE) & PAGE_MASK)
#define VMA_KERN_MAX KERNEL_SPACE
#define VMA_USER_MAX (1UL << 47) // TODO
struct vma;
/** @brief VMA structure definition */
/** @brief VMA structure definition
*
* Each item in this linked list marks a used part of the virtual address space.
* Its used by vm_alloc() to find holes between them.
*/
typedef struct vma {
/// Start address of the memory area
size_t start;
/// End address of the memory area
size_t end;
/// Type flags field
uint32_t type;
uint32_t flags;
/// Pointer of next VMA element in the list
struct vma* next;
/// Pointer to previous VMA element in the list
struct vma* prev;
} vma_t;
/** @brief Add a new virtual memory region to the list of VMAs
/** @brief Add a new virtual memory area to the list of VMAs
*
* @param task Pointer to the task_t structure of the task
* @param start Start address of the new region
* @param end End address of the new region
* @param type Type flags the new region shall have
* @param start Start address of the new area
* @param end End address of the new area
* @param flags Type flags the new area shall have
*
* @return
* - 0 on success
* - -EINVAL (-22) or -EINVAL (-12) on failure
*/
int vma_add(struct task* task, size_t start, size_t end, uint32_t type);
int vma_add(size_t start, size_t end, uint32_t flags);
/** @brief Dump information about this task's VMAs into the terminal.
/** @brief Search for a free memory area
*
* This will print out Start, end and flags for each VMA in the task's list
* @param size Size of requestes VMA in bytes
* @param flags
* @return Type flags the new area shall have
* - 0 on failure
* - the start address of a free area
*/
size_t vma_alloc(size_t size, uint32_t flags);
/** @brief Free an allocated memory area
*
* @param task The task's task_t structure
* @param start Start address of the area to be freed
* @param end End address of the to be freed
* @return
* - 0 on success
* - -EINVAL (-22) on failure
*/
int vma_dump(struct task* task);
int vma_free(size_t start, size_t end);
/** @brief Free all virtual memory areas
*
* @return
* - 0 on success
*/
int drop_vma_list();
/** @brief Copy the VMA list of the current task to task
*
* @param task The task where the list should be copied to
* @return
* - 0 on success
*/
int copy_vma_list(struct task* task);
/** @brief Dump information about this task's VMAs into the terminal. */
void vma_dump();
#ifdef __cplusplus
}

View file

@ -196,7 +196,6 @@ static void wakeup_blocked_tasks(int result)
/** @brief A procedure to be called by procedures which are called by exiting tasks. */
static void NORETURN do_exit(int arg) {
vma_t* tmp;
task_t* curr_task = per_core(current_task);
uint32_t flags, core_id, fd, status;
@ -230,18 +229,7 @@ static void NORETURN do_exit(int arg) {
wakeup_blocked_tasks(arg);
//vma_dump(curr_task);
spinlock_lock(&curr_task->vma_lock);
// remove memory regions
while((tmp = curr_task->vma_list) != NULL) {
kfree((void*) tmp->start, tmp->end - tmp->start + 1);
curr_task->vma_list = tmp->next;
kfree((void*) tmp, sizeof(vma_t));
}
spinlock_unlock(&curr_task->vma_lock);
drop_vma_list(); // kfree virtual memory areas and the vma_list
drop_page_map(); // delete page directory and its page tables
#if 0
@ -262,9 +250,7 @@ static void NORETURN do_exit(int arg) {
reschedule();
kprintf("Kernel panic: scheduler on core %d found no valid task\n", CORE_ID);
while(1) {
HALT;
}
while(1) HALT;
}
/** @brief A procedure to be called by kernel tasks */
@ -330,7 +316,7 @@ static int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio, uin
ret = create_page_map(task_table+i, 0);
if (ret < 0) {
ret = -ENOMEM;
goto create_task_out;
goto out;
}
task_table[i].id = i;
@ -376,7 +362,7 @@ static int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio, uin
}
}
create_task_out:
out:
spinlock_irqsave_unlock(&table_lock);
return ret;
@ -387,11 +373,7 @@ int sys_fork(void)
int ret = -ENOMEM;
unsigned int i, core_id, fd_i;
task_t* parent_task = per_core(current_task);
vma_t** child;
vma_t* parent;
vma_t* tmp;
spinlock_lock(&parent_task->vma_lock);
spinlock_irqsave_lock(&table_lock);
core_id = CORE_ID;
@ -403,43 +385,26 @@ int sys_fork(void)
ret = create_page_map(task_table+i, 1);
if (ret < 0) {
ret = -ENOMEM;
goto create_task_out;
goto out;
}
ret = copy_vma_list(child_task);
if (BUILTIN_EXPECT(!ret, 0)) {
ret = -ENOMEM;
goto out;
}
task_table[i].id = i;
task_table[i].last_stack_pointer = NULL;
task_table[i].stack = create_stack();
spinlock_init(&task_table[i].vma_lock);
// init fildes_table
// copy VMA list
child = &task_table[i].vma_list;
parent = parent_task->vma_list;
tmp = NULL;
while(parent) {
*child = (vma_t*) kmalloc(sizeof(vma_t));
if (BUILTIN_EXPECT(!child, 0))
break;
(*child)->start = parent->start;
(*child)->end = parent->end;
(*child)->type = parent->type;
(*child)->prev = tmp;
(*child)->next = NULL;
parent = parent->next;
tmp = *child;
child = &((*child)->next);
}
task_table[i].fildes_table = kmalloc(sizeof(filp_t)*NR_OPEN);
memcpy(task_table[i].fildes_table, parent_task->fildes_table, sizeof(filp_t)*NR_OPEN);
for (fd_i = 0; fd_i < NR_OPEN; fd_i++)
for (fd_i = 0; fd_i < NR_OPEN; fd_i++) {
if ((task_table[i].fildes_table[fd_i]) != NULL)
task_table[i].fildes_table[fd_i]->count++;
}
mailbox_wait_msg_init(&task_table[i].inbox);
memset(task_table[i].outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS);
@ -487,9 +452,8 @@ int sys_fork(void)
}
}
create_task_out:
out:
spinlock_irqsave_unlock(&table_lock);
spinlock_unlock(&parent_task->vma_lock);
return ret;
}
@ -679,7 +643,7 @@ static int load_task(load_args_t* largs)
flags |= VMA_WRITE;
if (prog_header.flags & PF_X)
flags |= VMA_EXECUTE;
vma_add(curr_task, prog_header.virt_addr, prog_header.virt_addr+npages*PAGE_SIZE-1, flags);
vma_add(prog_header.virt_addr, prog_header.virt_addr+npages*PAGE_SIZE-1, flags);
if (!(prog_header.flags & PF_W))
change_page_permissions(prog_header.virt_addr, prog_header.virt_addr+npages*PAGE_SIZE-1, flags);
@ -708,7 +672,7 @@ static int load_task(load_args_t* largs)
flags |= VMA_WRITE;
if (prog_header.flags & PF_X)
flags |= VMA_EXECUTE;
vma_add(curr_task, stack, stack+npages*PAGE_SIZE-1, flags);
vma_add(stack, stack+npages*PAGE_SIZE-1, flags);
break;
}
}
@ -871,13 +835,11 @@ int create_user_task_on_core(tid_t* id, const char* fname, char** argv, uint32_t
int sys_execve(const char* fname, char** argv, char** env)
{
vfs_node_t* node;
vma_t* tmp;
size_t i, buffer_size = 0;
load_args_t* load_args = NULL;
char *dest, *src;
int ret, argc = 0;
int envc = 0;
task_t* curr_task = per_core(current_task);
node = findnode_fs((char*) fname);
if (!node || !(node->type == FS_FILE))
@ -920,16 +882,8 @@ int sys_execve(const char* fname, char** argv, char** env)
while ((*dest++ = *src++) != 0);
}
spinlock_lock(&curr_task->vma_lock);
// remove old program
while((tmp = curr_task->vma_list) != NULL) {
kfree((void*) tmp->start, tmp->end - tmp->start + 1);
curr_task->vma_list = tmp->next;
kfree((void*) tmp, sizeof(vma_t));
}
spinlock_unlock(&curr_task->vma_lock);
drop_vma_list();
/*
* we use a trap gate to enter the kernel

View file

@ -272,18 +272,44 @@ int mmu_init(void)
return ret;
}
// add kernel to VMA list
vma_add((size_t) &kernel_start & PAGE_MASK,
PAGE_ALIGN((size_t) &kernel_end),
VMA_READ|VMA_WRITE|VMA_EXECUTE|VMA_CACHEABLE);
// add LAPIC tp VMA list
vma_add((size_t) &kernel_start - PAGE_SIZE,
(size_t) &kernel_start,
VMA_READ|VMA_WRITE);
#if MAX_CORES > 1
// reserve page for SMP boot code
vma_add(SMP_SETUP_ADDR & PAGE_MASK,
PAGE_ALIGN(SMP_SETUP_ADDR + PAGE_SIZE),
VMA_READ|VMA_WRITE|VMA_EXECUTE|VMA_CACHEABLE);
#endif
#ifdef CONFIG_MULTIBOOT
/*
* Modules like the init ram disk are already loaded.
* Therefore, we set these pages as used.
*/
if (mb_info) {
vma_add((size_t) mb_info & PAGE_MASK,
PAGE_ALIGN((size_t) mb_info + sizeof(multiboot_info_t)),
VMA_READ|VMA_CACHEABLE);
if (mb_info->flags & MULTIBOOT_INFO_MODS) {
multiboot_module_t* mmodule = (multiboot_module_t*) ((size_t) mb_info->mods_addr);
vma_add((size_t) mb_info->mods_addr & PAGE_MASK,
PAGE_ALIGN((size_t) mb_info->mods_addr + mb_info->mods_count*sizeof(multiboot_module_t)),
VMA_READ|VMA_CACHEABLE);
for(i=0; i<mb_info->mods_count; i++) {
vma_add(PAGE_ALIGN(mmodule[i].mod_start),
PAGE_ALIGN(mmodule[i].mod_end),
VMA_READ|VMA_WRITE|VMA_CACHEABLE);
for(addr=mmodule[i].mod_start; addr<mmodule[i].mod_end; addr+=PAGE_SIZE) {
page_set_mark(addr >> PAGE_SHIFT);

355
mm/vma.c
View file

@ -1,5 +1,5 @@
/*
* Copyright 2011 Stefan Lankes, Chair for Operating Systems,
* Copyright 2011 Steffen Vogel, Chair for Operating Systems,
* RWTH Aachen University
*
* Licensed under the Apache License, Version 2.0 (the "License");
@ -17,87 +17,322 @@
* This file is part of MetalSVM.
*/
#include <metalsvm/vma.h>
#include <metalsvm/stdlib.h>
#include <metalsvm/stdio.h>
#include <metalsvm/tasks_types.h>
#include <metalsvm/spinlock.h>
#include <metalsvm/vma.h>
#include <metalsvm/errno.h>
/*
* add a new virtual memory region to the list of VMAs
* Kernel space VMA list and lock
*
* For bootstrapping we initialize the VMA list with one empty VMA
* (start == end) and expand this VMA by calls to vma_alloc()
*/
int vma_add(task_t* task, size_t start, size_t end, uint32_t type)
static vma_t vma_boot = { VMA_KERN_MAX, VMA_KERN_MAX, VMA_HEAP };
static vma_t* vma_list = &vma_boot;
static spinlock_t vma_lock = SPINLOCK_INIT;
size_t vma_alloc(size_t size, uint32_t flags)
{
vma_t* new_vma;
if (BUILTIN_EXPECT(!task || start > end, 0))
task_t* task = per_core(current_task);
spinlock_t* lock;
vma_t** list;
size_t ret = 0;
kprintf("vma_alloc(0x%lx, 0x%x)\n", size, flags);
size_t base, limit; // boundaries for search
size_t start, end;
if (BUILTIN_EXPECT(!size, 0))
return 0;
if (flags & VMA_USER) {
base = VMA_KERN_MAX;
limit = VMA_USER_MAX;
list = &task->vma_list;
lock = &task->vma_lock;
}
else {
base = 0;
limit = VMA_KERN_MAX;
list = &vma_list;
lock = &vma_lock;
}
spinlock_lock(lock);
// "last" fit search for free memory area
vma_t* pred = *list; // vma before current gap
vma_t* succ = NULL; // vma after current gap
do {
start = (pred) ? pred->end : base;
end = (succ) ? succ->start : limit;
if (end > start && end - start > size)
break; // we found a gap
succ = pred;
pred = (pred) ? pred->prev : NULL;
} while (pred || succ);
if (BUILTIN_EXPECT(end > limit || end < start || end - start < size, 0)) {
spinlock_unlock(lock);
return 0;
}
// resize existing vma
if (succ && succ->flags == flags) {
succ->start -= size;
ret = succ->start;
}
// insert new vma
else {
vma_t* new = kmalloc(sizeof(vma_t));
if (BUILTIN_EXPECT(!new, 0))
return 0;
new->start = end-size;
new->end = end;
new->flags = flags;
new->next = succ;
new->prev = pred;
if (pred)
pred->next = new;
if (succ)
succ->prev = new;
else
*list = new;
ret = new->start;
}
spinlock_unlock(lock);
return ret;
}
int vma_free(size_t start, size_t end)
{
task_t* task = per_core(current_task);
spinlock_t* lock;
vma_t* vma;
vma_t** list;
if (BUILTIN_EXPECT(start >= end, 0))
return -EINVAL;
new_vma = kmalloc(sizeof(new_vma));
if (!new_vma)
return -ENOMEM;
if (end <= VMA_KERN_MAX) {
lock = &vma_lock;
list = &vma_list;
}
else if (start >= VMA_KERN_MAX) {
lock = &task->vma_lock;
list = &task->vma_list;
}
else
return -EINVAL;
if (BUILTIN_EXPECT(!*list, 0))
return -EINVAL;
spinlock_lock(lock);
// search vma
vma = *list;
while (vma) {
if (start >= vma->start && end <= vma->end) break;
vma = vma->prev;
}
if (BUILTIN_EXPECT(!vma, 0)) {
spinlock_unlock(lock);
return -EINVAL;
}
// free/resize vma
if (start == vma->start && end == vma->end) {
if (vma == *list)
*list = vma->next; // update list head
if (vma->prev)
vma->prev->next = vma->next;
if (vma->next)
vma->next->prev = vma->prev;
kfree(vma);
}
else if (start == vma->start)
vma->start = end;
else if (end == vma->end)
vma->end = start;
else {
vma_t* new = kmalloc(sizeof(vma_t));
if (BUILTIN_EXPECT(!new, 0)) {
spinlock_unlock(lock);
return -ENOMEM;
}
new->start = end;
vma->end = start;
new->end = vma->end;
new->next = vma->next;
new->prev = vma;
vma->next = new;
}
spinlock_unlock(lock);
return 0;
}
int vma_add(size_t start, size_t end, uint32_t flags)
{
task_t* task = per_core(current_task);
spinlock_t* lock;
vma_t** list;
kprintf("vma_add(0x%lx, 0x%lx, 0x%x)\n", start, end, flags);
if (BUILTIN_EXPECT(start >= end, 0))
return -EINVAL;
if (flags & VMA_USER) {
list = &task->vma_list;
lock = &task->vma_lock;
// check if address is in userspace
if (BUILTIN_EXPECT(start < VMA_KERN_MAX, 0))
return -EINVAL;
}
else {
list = &vma_list;
lock = &vma_lock;
// check if address is in kernelspace
if (BUILTIN_EXPECT(end > VMA_KERN_MAX, 0))
return -EINVAL;
}
spinlock_lock(lock);
// search gap
vma_t* pred = *list;
vma_t* succ = NULL;
while (pred) {
if ((!pred || pred->end <= start) &&
(!succ || succ->start >= end))
break;
succ = pred;
pred = pred->prev;
}
// resize existing vma
if (pred && pred->end == start && pred->flags == flags)
pred->end = end;
else if (succ && succ->start == end && succ->flags == flags)
succ->start = start;
// insert new vma
else {
vma_t* new = kmalloc(sizeof(vma_t));
if (BUILTIN_EXPECT(!new, 0))
return 0;
new->start = start;
new->end = end;
new->flags = flags;
new->next = succ;
new->prev = pred;
if (pred)
pred->next = new;
if (succ)
succ->prev = new;
else
*list = new;
}
spinlock_unlock(lock);
return 0;
}
int copy_vma_list(task_t* task)
{
task_t* parent_task = per_core(current_task);
spinlock_init(&task->vma_lock);
spinlock_lock(&parent_task->vma_lock);
spinlock_lock(&task->vma_lock);
int ret = 0;
vma_t* last = NULL;
vma_t* parent = parent_task->vma_list;
while (parent) {
vma_t *new = kmalloc(sizeof(vma_t));
if (BUILTIN_EXPECT(!new, 0)) {
ret = -ENOMEM;
goto out;
}
new->start = parent->start;
new->end = parent->end;
new->flags = parent->flags;
new->prev = last;
if (last)
last->next = new;
else
task->vma_list = new;
last = new;
parent = parent->next;
}
out:
spinlock_unlock(&task->vma_lock);
spinlock_unlock(&parent_task->vma_lock);
return ret;
}
int drop_vma_list()
{
task_t* task = per_core(current_task);
spinlock_lock(&task->vma_lock);
new_vma->start = start;
new_vma->end = end;
new_vma->type = type;
if (!(task->vma_list)) {
new_vma->next = new_vma->prev = NULL;
task->vma_list = new_vma;
} else {
vma_t* tmp = task->vma_list;
while (tmp->next && tmp->start < start)
tmp = tmp->next;
new_vma->next = tmp->next;
new_vma->prev = tmp;
tmp->next = new_vma;
}
while(task->vma_list)
pfree((void*) task->vma_list->start, task->vma_list->end - task->vma_list->start);
spinlock_unlock(&task->vma_lock);
return 0;
}
int vma_dump(task_t* task)
void vma_dump()
{
vma_t* tmp;
if (BUILTIN_EXPECT(!task, 0))
return -EINVAL;
spinlock_lock(&task->vma_lock);
int cnt = 0;
tmp = task->vma_list;
while (tmp) {
kprintf("#%d\t%8x - %8x: size=%6x, flags=", cnt, tmp->start, tmp->end, tmp->end - tmp->start);
if (tmp->type & VMA_READ)
kputs("r");
else
kputs("-");
if (tmp->type & VMA_WRITE)
kputs("w");
else
kputs("-");
if (tmp->type & VMA_EXECUTE)
kputs("x");
else
kputs("-");
kputs("\n");
tmp = tmp->next;
cnt++;
void print_vma(vma_t *vma) {
while (vma) {
kprintf("0x%lx - 0x%lx: size=%x, flags=%c%c%c\n", vma->start, vma->end, vma->end - vma->start,
(vma->flags & VMA_READ) ? 'r' : '-',
(vma->flags & VMA_WRITE) ? 'w' : '-',
(vma->flags & VMA_EXECUTE) ? 'x' : '-');
vma = vma->prev;
}
}
spinlock_unlock(&task->vma_lock);
task_t* task = per_core(current_task);
return 0;
kputs("Kernelspace VMAs:\n");
spinlock_lock(&vma_lock);
print_vma(vma_list);
spinlock_unlock(&vma_lock);
kputs("Userspace VMAs:\n");
spinlock_lock(&task->vma_lock);
print_vma(task->vma_list);
spinlock_unlock(&task->vma_lock);
}