introduce a special PGD lock for the kernel space

This commit is contained in:
Stefan Lankes 2011-03-04 13:47:06 +01:00
parent 56ee331596
commit 08b0207dc9

View file

@ -51,7 +51,7 @@ extern const void kernel_end;
// boot task's page directory and page directory lock
static page_dir_t boot_pgd = {{[0 ... 1023] = 0}};
//static spinlock_t boot_lock = SPINLOCK_INIT;
static spinlock_t kslock = SPINLOCK_INIT;
static int paging_enabled = 0;
page_dir_t* get_boot_pgd(void)
@ -142,12 +142,16 @@ int create_pgd(task_t* task, int copy)
memset(pgt, 0, sizeof(page_table_t));
counter++;
spinlock_lock(&kslock);
for(i=0; i<KERNEL_SPACE/(1024*PAGE_SIZE); i++) {
pgd->entries[i] = boot_pgd.entries[i];
if (pgd->entries[i])
pgt->entries[i] = pgt_container->entries[i];
}
spinlock_unlock(&kslock);
// map page table container at the end of the kernel space
viraddr = (KERNEL_SPACE - PAGE_SIZE) & 0xFFFFF000;
index1 = viraddr >> 22;
@ -252,6 +256,7 @@ out:
size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flags)
{
task_t* task = per_core(current_task);
spinlock_t* pgd_lock;
page_table_t* pgt;
size_t index, i;
size_t ret;
@ -262,12 +267,17 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
if (BUILTIN_EXPECT(!paging_enabled && (viraddr != phyaddr), 0))
return 0;
spinlock_lock(&task->pgd_lock);
if (flags & MAP_KERNEL_SPACE)
pgd_lock = &kslock;
else
pgd_lock = &task->pgd_lock;
spinlock_lock(pgd_lock);
if (!viraddr) {
viraddr = vm_alloc(npages, flags);
if (BUILTIN_EXPECT(!viraddr, 0)) {
spinlock_unlock(&task->pgd_lock);
spinlock_unlock(pgd_lock);
kputs("map_adress: found no valid virtual address\n");
return 0;
}
@ -283,7 +293,7 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
pgt = (page_table_t*) get_pages(1);
if (BUILTIN_EXPECT(!pgt, 0)) {
spinlock_unlock(&task->pgd_lock);
spinlock_unlock(pgd_lock);
kputs("map_address: out of memory\n");
return 0;
}
@ -303,7 +313,7 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
pgt_container = (page_table_t*) (task->pgd->entries[(KERNEL_SPACE - PAGE_SIZE) >> 22] & 0xFFFFF000);
if (BUILTIN_EXPECT(!pgt_container, 0)) {
spinlock_unlock(&task->pgd_lock);
spinlock_unlock(pgd_lock);
kputs("map_address: internal error\n");
return 0;
}
@ -324,7 +334,7 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
index = (viraddr >> 12) & 0x3FF;
if (BUILTIN_EXPECT(pgt->entries[index], 0)) {
spinlock_unlock(&task->pgd_lock);
spinlock_unlock(pgd_lock);
kprintf("0x%x is already maped\n");
return 0;
}
@ -340,7 +350,7 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
tlb_flush_one_page(viraddr);
}
spinlock_unlock(&task->pgd_lock);
spinlock_unlock(pgd_lock);
return ret;
}
@ -403,6 +413,7 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
size_t vm_alloc(uint32_t npages, uint32_t flags)
{
task_t* task = per_core(current_task);
spinlock_t* pgd_lock;
uint32_t index1, index2, j;
size_t viraddr, i, ret = 0;
size_t start, end;
@ -411,12 +422,12 @@ size_t vm_alloc(uint32_t npages, uint32_t flags)
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
return 0;
spinlock_lock(&task->pgd_lock);
if (flags & MAP_KERNEL_SPACE) {
pgd_lock = &kslock;
start = (((size_t) &kernel_end) + PAGE_SIZE) & 0xFFFFF000;
end = (KERNEL_SPACE - 2*PAGE_SIZE) & 0xFFFFF000; // we need 1 PAGE for our PGTs
} else {
pgd_lock = &task->pgd_lock;
start = KERNEL_SPACE & 0xFFFFF000;
end = 0xFFFFF000;
}
@ -424,6 +435,8 @@ size_t vm_alloc(uint32_t npages, uint32_t flags)
if (BUILTIN_EXPECT(!npages, 0))
return 0;
spinlock_lock(pgd_lock);
viraddr = i = start;
j = 0;
do {
@ -445,7 +458,7 @@ size_t vm_alloc(uint32_t npages, uint32_t flags)
if ((j >= npages) && (viraddr < end))
ret = viraddr;
spinlock_unlock(&task->pgd_lock);
spinlock_unlock(pgd_lock);
return ret;
}
@ -453,6 +466,7 @@ size_t vm_alloc(uint32_t npages, uint32_t flags)
int vm_free(size_t viraddr, uint32_t npages)
{
task_t* task = per_core(current_task);
spinlock_t* pgd_lock;
uint32_t i;
uint32_t index1, index2;
page_table_t* pgt;
@ -460,7 +474,12 @@ int vm_free(size_t viraddr, uint32_t npages)
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
return -EINVAL;
spinlock_lock(&task->pgd_lock);
if (viraddr <= KERNEL_SPACE)
pgd_lock = &kslock;
else
pgd_lock = &task->pgd_lock;
spinlock_lock(pgd_lock);
for(i=0; i<npages; i++, viraddr+=PAGE_SIZE)
{
@ -473,7 +492,7 @@ int vm_free(size_t viraddr, uint32_t npages)
pgt->entries[index2] = 0;
}
spinlock_unlock(&task->pgd_lock);
spinlock_unlock(pgd_lock);
return 0;
}