removed old vm_alloc()/vm_free() which have been replaced by vma_alloc()/vma_free()

This commit is contained in:
Steffen Vogel 2013-11-20 13:22:09 +01:00
parent 71f55f0a89
commit de33962e9d

View file

@ -405,91 +405,6 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
return -EINVAL;
}
/*
* Use the first fit algorithm to find a valid address range
*
* TODO: O(n) => bad performance, we need a better approach
*/
size_t vm_alloc(uint32_t npages, uint32_t flags)
{
task_t* task = per_core(current_task);
size_t viraddr, i, j, ret = 0;
size_t start, end;
page_map_t* pdpt, * pgd, * pgt;
uint16_t index_pml4, index_pdpt;
uint16_t index_pgd, index_pgt;
if (BUILTIN_EXPECT(!task || !task->page_map || !paging_enabled, 0))
return 0;
if (flags & MAP_KERNEL_SPACE) {
start = (((size_t) &kernel_end) + 10*PAGE_SIZE) & PAGE_MASK;
end = (KERNEL_SPACE - PAGE_SIZE) & PAGE_MASK;
} else {
start = KERNEL_SPACE & PAGE_MASK;
end = PAGE_MASK;
}
if (BUILTIN_EXPECT(!npages, 0))
return 0;
if (flags & MAP_KERNEL_SPACE)
spinlock_lock(&kslock);
else
spinlock_irqsave_lock(&task->page_lock);
viraddr = i = start;
j = 0;
do {
index_pml4 = (viraddr >> 39) & 0x1FF;
index_pdpt = (viraddr >> 30) & 0x1FF;
index_pgd = (viraddr >> 21) & 0x1FF;
index_pgt = (viraddr >> 12) & 0x1FF;
// Currently, we allocate pages only in kernel space.
// => physical address of the page table is identical of the virtual address
pdpt = (page_map_t*) (task->page_map->entries[index_pml4] & PAGE_MASK);
if (!pdpt) {
i += (size_t)PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES*PAGE_SIZE;
j += PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES;
continue;
}
pgd = (page_map_t*) (pdpt->entries[index_pdpt] & PAGE_MASK);
if (!pgd) {
i += PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES*PAGE_SIZE;
j += PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES;
continue;
}
pgt = (page_map_t*) (pgd->entries[index_pgd] & PAGE_MASK);
if (!pgt) {
i += PAGE_MAP_ENTRIES*PAGE_SIZE;
j += PAGE_MAP_ENTRIES;
continue;
}
if (!(pgt->entries[index_pgt])) {
i += PAGE_SIZE;
j++;
} else {
// restart search
j = 0;
viraddr = i + PAGE_SIZE;
i = i + PAGE_SIZE;
}
} while((j < npages) && (i<=end));
if ((j >= npages) && (viraddr < end))
ret = viraddr;
if (flags & MAP_KERNEL_SPACE)
spinlock_unlock(&kslock);
else
spinlock_irqsave_unlock(&task->page_lock);
return ret;
}
int unmap_region(size_t viraddr, uint32_t npages)
{
@ -558,70 +473,6 @@ int unmap_region(size_t viraddr, uint32_t npages)
return 0;
}
int vm_free(size_t viraddr, uint32_t npages)
{
task_t* task = per_core(current_task);
page_map_t* pdpt, * pgd, * pgt;
size_t i;
uint16_t index_pml4, index_pdpt;
uint16_t index_pgd, index_pgt;
if (BUILTIN_EXPECT(!task || !task->page_map || !paging_enabled, 0))
return -EINVAL;
if (viraddr <= KERNEL_SPACE)
spinlock_lock(&kslock);
else
spinlock_irqsave_lock(&task->page_lock);
i = 0;
while(i<npages)
{
index_pml4 = (viraddr >> 39) & 0x1FF;
index_pdpt = (viraddr >> 30) & 0x1FF;
index_pgd = (viraddr >> 21) & 0x1FF;
index_pgt = (viraddr >> 12) & 0x1FF;
// Currently, we allocate pages only in kernel space.
// => physical address of the page table is identical of the virtual address
pdpt = (page_map_t*) (task->page_map->entries[index_pml4] & PAGE_MASK);
if (!pdpt) {
viraddr += (size_t) PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES*PAGE_SIZE;
i += PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES;
continue;
}
pgd = (page_map_t*) (pdpt->entries[index_pdpt] & PAGE_MASK);
if (!pgd) {
viraddr += PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES*PAGE_SIZE;
i += PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES;
continue;
}
pgt = (page_map_t*) (pgd->entries[index_pgd] & PAGE_MASK);
if (!pgt) {
viraddr += PAGE_MAP_ENTRIES*PAGE_SIZE;
i += PAGE_MAP_ENTRIES;
continue;
}
if (pgt->entries[index_pgt])
pgt->entries[index_pgt] = 0;
viraddr +=PAGE_SIZE;
i++;
tlb_flush_one_page(viraddr);
}
if (viraddr <= KERNEL_SPACE)
spinlock_unlock(&kslock);
else
spinlock_irqsave_unlock(&task->page_lock);
return 0;
}
static void pagefault_handler(struct state *s)
{
task_t* task = per_core(current_task);