use a spinlock for each PGD
increases also the useability of vm_alloc and vm_free
This commit is contained in:
parent
e157f93104
commit
17193475bb
4 changed files with 38 additions and 21 deletions
|
@ -51,7 +51,6 @@ extern const void kernel_end;
|
|||
|
||||
// boot task's page directory and page directory lock
|
||||
static page_dir_t boot_pgd = {{[0 ... 1023] = 0}};
|
||||
static spinlock_t boot_pgd_lock = SPINLOCK_INIT;
|
||||
static int paging_enabled = 0;
|
||||
|
||||
int get_boot_pgd(task_t* task)
|
||||
|
@ -60,7 +59,6 @@ int get_boot_pgd(task_t* task)
|
|||
return -EINVAL;
|
||||
|
||||
task->pgd = &boot_pgd;
|
||||
task->pgd_lock = &boot_pgd_lock;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -111,7 +109,6 @@ int create_pgd(task_t* task)
|
|||
pgt->entries[index2] = ((size_t) virt_to_phys((size_t) pgt) & 0xFFFFF000)|KERN_PAGE;
|
||||
|
||||
task->pgd = pgd;
|
||||
task->pgd_lock = &boot_pgd_lock;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -125,7 +122,7 @@ int drop_pgd(void)
|
|||
if (BUILTIN_EXPECT(pgd == &boot_pgd, 0))
|
||||
return -EINVAL;
|
||||
|
||||
spinlock_lock(per_core(current_task)->pgd_lock);
|
||||
spinlock_lock(&(per_core(current_task)->pgd_lock));
|
||||
|
||||
for(i=KERNEL_SPACE/(1024*PAGE_SIZE); i<1024; i++)
|
||||
if (pgd->entries[i] & 0xFFFFF000)
|
||||
|
@ -136,7 +133,7 @@ int drop_pgd(void)
|
|||
|
||||
per_core(current_task)->pgd = NULL;
|
||||
|
||||
spinlock_unlock(per_core(current_task)->pgd_lock);
|
||||
spinlock_unlock(&(per_core(current_task)->pgd_lock));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -154,6 +151,8 @@ size_t virt_to_phys(size_t viraddr)
|
|||
if (BUILTIN_EXPECT(!task || !task->pgd, 0))
|
||||
return 0;
|
||||
|
||||
spinlock_lock(&(per_core(current_task)->pgd_lock));
|
||||
|
||||
index1 = viraddr >> 22;
|
||||
index2 = (viraddr >> 12) & 0x3FF;
|
||||
|
||||
|
@ -169,6 +168,8 @@ size_t virt_to_phys(size_t viraddr)
|
|||
out:
|
||||
//kprintf("vir %p to phy %p\n", viraddr, ret);
|
||||
|
||||
spinlock_unlock(&(per_core(current_task)->pgd_lock));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -185,9 +186,12 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
|
|||
if (BUILTIN_EXPECT(!paging_enabled && (viraddr != phyaddr), 0))
|
||||
return 0;
|
||||
|
||||
spinlock_lock(&task->pgd_lock);
|
||||
|
||||
if (!viraddr) {
|
||||
viraddr = vm_alloc(npages, flags);
|
||||
if (BUILTIN_EXPECT(!viraddr, 0)) {
|
||||
spinlock_unlock(&task->pgd_lock);
|
||||
kputs("map_adress: found no valid virtual address\n");
|
||||
return 0;
|
||||
}
|
||||
|
@ -203,7 +207,7 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
|
|||
|
||||
pgt = (page_table_t*) get_pages(1);
|
||||
if (BUILTIN_EXPECT(!pgt, 0)) {
|
||||
spinlock_unlock(task->pgd_lock);
|
||||
spinlock_unlock(&task->pgd_lock);
|
||||
kputs("map_address: out of memory\n");
|
||||
return 0;
|
||||
}
|
||||
|
@ -223,7 +227,7 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
|
|||
pgt_container = (page_table_t*) (task->pgd->entries[(KERNEL_SPACE - PAGE_SIZE) >> 22] & 0xFFFFF000);
|
||||
|
||||
if (BUILTIN_EXPECT(!pgt_container, 0)) {
|
||||
spinlock_unlock(task->pgd_lock);
|
||||
spinlock_unlock(&task->pgd_lock);
|
||||
kputs("map_address: internal error\n");
|
||||
return 0;
|
||||
}
|
||||
|
@ -244,7 +248,7 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
|
|||
|
||||
index = (viraddr >> 12) & 0x3FF;
|
||||
if (BUILTIN_EXPECT(pgt->entries[index], 0)) {
|
||||
spinlock_unlock(task->pgd_lock);
|
||||
spinlock_unlock(&task->pgd_lock);
|
||||
kprintf("0x%x is already maped\n");
|
||||
return 0;
|
||||
}
|
||||
|
@ -260,6 +264,8 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
|
|||
tlb_flush_one_page(viraddr);
|
||||
}
|
||||
|
||||
spinlock_unlock(&task->pgd_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -278,7 +284,7 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
|
|||
if (BUILTIN_EXPECT(!pgd, 0))
|
||||
return -EINVAL;
|
||||
|
||||
spinlock_lock(per_core(current_task)->pgd_lock);
|
||||
spinlock_lock(&(per_core(current_task)->pgd_lock));
|
||||
|
||||
while (viraddr < end)
|
||||
{
|
||||
|
@ -307,7 +313,7 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
|
|||
}
|
||||
}
|
||||
|
||||
spinlock_unlock(per_core(current_task)->pgd_lock);
|
||||
spinlock_unlock(&(per_core(current_task)->pgd_lock));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -321,13 +327,18 @@ size_t vm_alloc(uint32_t npages, uint32_t flags)
|
|||
{
|
||||
task_t* task = per_core(current_task);
|
||||
uint32_t index1, index2, j;
|
||||
size_t viraddr, i;
|
||||
size_t viraddr, i, ret = 0;
|
||||
size_t start, end;
|
||||
page_table_t* pgt;
|
||||
uint32_t has_lock;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
|
||||
return 0;
|
||||
|
||||
has_lock = spinlock_has_lock(&task->pgd_lock);
|
||||
if (!has_lock)
|
||||
spinlock_lock(&task->pgd_lock);
|
||||
|
||||
if (flags & MAP_KERNEL_SPACE) {
|
||||
start = (((size_t) &kernel_end) + PAGE_SIZE) & 0xFFFFF000;
|
||||
end = (KERNEL_SPACE - 2*PAGE_SIZE) & 0xFFFFF000; // we need 1 PAGE for our PGTs
|
||||
|
@ -358,21 +369,28 @@ size_t vm_alloc(uint32_t npages, uint32_t flags)
|
|||
} while((j < npages) && (i<=end));
|
||||
|
||||
if ((j >= npages) && (viraddr < end))
|
||||
return viraddr;
|
||||
ret = viraddr;
|
||||
|
||||
return 0;
|
||||
if (!has_lock)
|
||||
spinlock_unlock(&task->pgd_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vm_free(size_t viraddr, uint32_t npages)
|
||||
{
|
||||
task_t* task = per_core(current_task);
|
||||
uint32_t i;
|
||||
uint32_t index1, index2;
|
||||
uint32_t index1, index2, has_lock;
|
||||
page_table_t* pgt;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
|
||||
return -EINVAL;
|
||||
|
||||
has_lock = spinlock_has_lock(&task->pgd_lock);
|
||||
if (!has_lock)
|
||||
spinlock_lock(&task->pgd_lock);
|
||||
|
||||
for(i=0; i<npages; i++, viraddr+=PAGE_SIZE)
|
||||
{
|
||||
index1 = viraddr >> 22;
|
||||
|
@ -384,6 +402,9 @@ int vm_free(size_t viraddr, uint32_t npages)
|
|||
pgt->entries[index2] = 0;
|
||||
}
|
||||
|
||||
if (!has_lock)
|
||||
spinlock_unlock(&task->pgd_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ typedef struct task {
|
|||
tid_t id; /* task id = position in the task table */
|
||||
uint32_t status;
|
||||
atomic_int32_t mem_usage; /* in number of pages */
|
||||
struct spinlock* pgd_lock; /* avoids concurrent access to the page directoriy */
|
||||
struct spinlock pgd_lock; /* avoids concurrent access to the page directoriy */
|
||||
struct page_dir* pgd; /* pointer to the page directory */
|
||||
spinlock_t vma_lock;
|
||||
vma_t* vma_list;
|
||||
|
|
|
@ -32,7 +32,8 @@
|
|||
#include <asm/elf.h>
|
||||
|
||||
DEFINE_PER_CORE(task_t*, current_task, NULL);
|
||||
static task_t task_table[MAX_TASKS] = {[0 ... MAX_TASKS-1] = {0, TASK_INVALID, ATOMIC_INIT(0), NULL, NULL, SPINLOCK_INIT, NULL}};
|
||||
static task_t task_table[MAX_TASKS] = {[0 ... MAX_TASKS-1] = {0, TASK_INVALID, ATOMIC_INIT(0), \
|
||||
SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL}};
|
||||
static spinlock_t table_lock = SPINLOCK_INIT;
|
||||
|
||||
/*
|
||||
|
|
|
@ -259,7 +259,6 @@ void* mem_allocation(size_t sz, uint32_t flags)
|
|||
{
|
||||
size_t phyaddr, viraddr;
|
||||
uint32_t npages = sz / PAGE_SIZE;
|
||||
task_t* task = per_core(current_task);
|
||||
|
||||
if (sz % PAGE_SIZE)
|
||||
npages++;
|
||||
|
@ -268,9 +267,7 @@ void* mem_allocation(size_t sz, uint32_t flags)
|
|||
if (BUILTIN_EXPECT(!phyaddr, 0))
|
||||
return 0;
|
||||
|
||||
spinlock_lock(task->pgd_lock);
|
||||
viraddr = map_region(0, phyaddr, npages, flags);
|
||||
spinlock_unlock(task->pgd_lock);
|
||||
|
||||
return (void*) viraddr;
|
||||
}
|
||||
|
@ -305,9 +302,7 @@ void kfree(void* addr, size_t sz)
|
|||
}
|
||||
spinlock_unlock(&bitmap_lock);
|
||||
|
||||
spinlock_lock(task->pgd_lock);
|
||||
vm_free((size_t) addr, npages);
|
||||
spinlock_unlock(task->pgd_lock);
|
||||
|
||||
atomic_int32_sub(&total_allocated_pages, npages);
|
||||
atomic_int32_add(&total_available_pages, npages);
|
||||
|
|
Loading…
Add table
Reference in a new issue