From ec171dfcce2d0828f60d73be285c23dfac298b6f Mon Sep 17 00:00:00 2001 From: Steffen Vogel Date: Thu, 14 Nov 2013 13:12:35 +0100 Subject: [PATCH] implemented map_region() (more testing needed; will propably replaced by a iterative solution) --- arch/x86/mm/page64.c | 103 ++++++++++++++++++++----------------------- 1 file changed, 49 insertions(+), 54 deletions(-) diff --git a/arch/x86/mm/page64.c b/arch/x86/mm/page64.c index 2d9b1889..5da16793 100644 --- a/arch/x86/mm/page64.c +++ b/arch/x86/mm/page64.c @@ -283,21 +283,14 @@ size_t virt_to_phys(size_t viraddr) size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flags) { task_t* task = per_core(current_task); - page_map_t* pdpt, * pgd, * pgt; - uint16_t index_pml4, index_pdpt; - uint16_t index_pgd, index_pgt; size_t i, ret; if (BUILTIN_EXPECT(!task || !task->page_map, 0)) return 0; - if (flags & MAP_KERNEL_SPACE) - spinlock_lock(&kslock); - else - spinlock_irqsave_lock(&task->page_lock); - if (!viraddr) { - viraddr = vm_alloc(npages, flags); + kputs("map_region: deprecated vma_alloc() call from within map_region\n"); + viraddr = vma_alloc(npages*PAGE_SIZE, VMA_HEAP); if (BUILTIN_EXPECT(!viraddr, 0)) { kputs("map_region: found no valid virtual address\n"); ret = 0; @@ -305,59 +298,40 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag } } + // correct alignment + phyaddr &= PAGE_MASK; + viraddr &= PAGE_MASK; ret = viraddr; + + if (flags & MAP_KERNEL_SPACE) + spinlock_lock(&kslock); + else + spinlock_irqsave_lock(&task->page_lock); + + kprintf("map_region: map %u pages from 0x%lx to 0x%lx with flags: 0x%x\n", npages, viraddr, phyaddr, flags); for(i=0; i> 39) & 0x1FF; - index_pdpt = (viraddr >> 30) & 0x1FF; - index_pgd = (viraddr >> 21) & 0x1FF; - index_pgt = (viraddr >> 12) & 0x1FF; + // page table entry + size_t* pte = (size_t *) (PAGE_PGT|(viraddr >> 9)); - pdpt = (page_map_t*) (task->page_map->entries[index_pml4] & PAGE_MASK); - if (!pgt) { - kputs("map_region: out of memory\n"); - ret = 0; - goto out; - } - - pgd = (page_map_t*) (pdpt->entries[index_pdpt] & PAGE_MASK); - if (!pgd) { - kputs("map_region: out of memory\n"); - ret = 0; - goto out; - } - - pgt = (page_map_t*) (pgd->entries[index_pgd] & PAGE_MASK); - if (!pgt) { - kputs("map_region: out of memory\n"); - ret = 0; - goto out; - } - - /* convert physical address to virtual */ - // Currently, we allocate pages only in kernel space. - // => physical address of the page table is identical of the virtual address - //if (paging_enabled) - // pgt = (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index*PAGE_SIZE) & PAGE_MASK); - - if (pgt->entries[index_pgt] && !(flags & MAP_REMAP)) { - kprintf("0x%x is already mapped\n", viraddr); + if (*pte && !(flags & MAP_REMAP)) { + kprintf("map_region: 0x%lx is already mapped\n", viraddr); ret = 0; goto out; } if (flags & MAP_USER_SPACE) - pgt->entries[index_pgt] = USER_PAGE|(phyaddr & PAGE_MASK); + *pte = phyaddr|USER_PAGE; else - pgt->entries[index_pgt] = KERN_PAGE|(phyaddr & PAGE_MASK); + *pte = phyaddr|KERN_PAGE; if (flags & MAP_NO_CACHE) - pgt->entries[index_pgt] |= PG_PCD; + *pte |= PG_PCD; if (flags & MAP_NO_ACCESS) - pgt->entries[index_pgt] &= ~PG_PRESENT; + *pte &= ~PG_PRESENT; if (flags & MAP_WT) - pgt->entries[index_pgt] |= PG_PWT; + *pte |= PG_PWT; if (flags & MAP_USER_SPACE) atomic_int32_inc(&task->user_usage); @@ -651,10 +625,8 @@ int vm_free(size_t viraddr, uint32_t npages) static void pagefault_handler(struct state *s) { task_t* task = per_core(current_task); - //page_map_t* pgd = task->page_map; - //page_map_t* pgt = NULL; size_t viraddr = read_cr2(); - //size_t phyaddr; + size_t phyaddr; #if 0 if ((viraddr >= task->start_heap) && (viraddr <= task->end_heap) && (viraddr > KERNEL_SPACE)) { @@ -662,24 +634,47 @@ static void pagefault_handler(struct state *s) phyaddr = get_page(); if (BUILTIN_EXPECT(!phyaddr, 0)) - goto default_handler; + goto oom; if (map_region(viraddr, phyaddr, 1, MAP_USER_SPACE) == viraddr) { memset((void*) viraddr, 0x00, PAGE_SIZE); return; } - + kprintf("Could not map 0x%x at 0x%x\n", phyaddr, viraddr); put_page(phyaddr); } + // handle missing paging structures for userspace + // all kernel space paging structures have been initialized in entry64.asm + else if (viraddr >= PAGE_PGT) { + kprintf("map_region: missing paging structure at: 0x%lx (%s)\n", viraddr, map_to_lvlname(viraddr)); + + phyaddr = get_page(); + if (BUILTIN_EXPECT(!phyaddr, 0)) + goto oom; + + // TODO: initialize with zeros + // TODO: check that we are in userspace + + // get pointer to parent page level entry + size_t *entry = (size_t *) ((int64_t) viraddr >> 9 & ~0x07); + + // update entry + *entry = phyaddr|USER_TABLE; + + return; + } #endif -//default_handler: kprintf("PAGE FAULT: Task %u got page fault at %p (irq %llu, cs:rip 0x%llx:0x%llx)\n", task->id, viraddr, s->int_no, s->cs, s->rip); kprintf("Register state: rax = 0x%llx, rbx = 0x%llx, rcx = 0x%llx, rdx = 0x%llx, rdi = 0x%llx, rsi = 0x%llx, rbp = 0x%llx, rsp = 0x%llx\n", s->rax, s->rbx, s->rcx, s->rdx, s->rdi, s->rsi, s->rbp, s->rsp); -while(1); + irq_enable(); + abort(); + +oom: + kputs("map_region: out of memory\n"); irq_enable(); abort(); }