diff --git a/arch/x86/mm/page64.c b/arch/x86/mm/page64.c index 884158cc..1bb6cea5 100644 --- a/arch/x86/mm/page64.c +++ b/arch/x86/mm/page64.c @@ -128,7 +128,7 @@ size_t virt_to_phys(size_t viraddr) if (BUILTIN_EXPECT(!task || !task->pgd, 0)) return 0; - spinlock_lock(&task->pgd_lock); + spinlock_irqsave_lock(&task->pgd_lock); // Currently, we allocate pages only in kernel space. // => physical address of the page table is identical of the virtual address @@ -152,7 +152,7 @@ size_t virt_to_phys(size_t viraddr) out: //kprintf("vir %p to phy %p\n", viraddr, ret); - spinlock_unlock(&task->pgd_lock); + spinlock_irqsave_unlock(&task->pgd_lock); return ret; } @@ -160,10 +160,8 @@ out: size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flags) { task_t* task = per_core(current_task); - spinlock_t* pgd_lock; page_table_t* pgt; size_t i, ret; - uint32_t irqflags; if (BUILTIN_EXPECT(!task || !task->pgd, 0)) return 0; @@ -172,20 +170,16 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag return 0; if (flags & MAP_KERNEL_SPACE) - pgd_lock = &kslock; + spinlock_lock(&kslock); else - pgd_lock = &task->pgd_lock; - - // avoid interrupts because the IRQ handler is able to call map_region - irqflags = irq_nested_disable(); - spinlock_lock(pgd_lock); + spinlock_irqsave_lock(&task->pgd_lock); if (!viraddr) { viraddr = vm_alloc(npages, flags); if (BUILTIN_EXPECT(!viraddr, 0)) { - spinlock_unlock(pgd_lock); kputs("map_region: found no valid virtual address\n"); - return 0; + ret = 0; + goto out; } } @@ -198,26 +192,23 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag pgt = (page_table_t*) (task->pgd->entries[idx_pd4] & PAGE_MASK); if (!pgt) { - spinlock_unlock(pgd_lock); - irq_nested_enable(irqflags); kputs("map_region: out of memory\n"); - return 0; + ret = 0; + goto out; } pgt = (page_table_t*) (pgt->entries[idx_dirp] & PAGE_MASK); if (!pgt) { - spinlock_unlock(pgd_lock); - irq_nested_enable(irqflags); kputs("map_region: out of memory\n"); - return 0; + ret = 0; + goto out; } pgt = (page_table_t*) (pgt->entries[idx_dir] & PAGE_MASK); if (!pgt) { - spinlock_unlock(pgd_lock); - irq_nested_enable(irqflags); kputs("map_region: out of memory\n"); - return 0; + ret = 0; + goto out; } /* convert physical address to virtual */ @@ -227,10 +218,9 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag // pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index*PAGE_SIZE) & PAGE_MASK); if (pgt->entries[idx_table] && !(flags & MAP_REMAP)) { - spinlock_unlock(pgd_lock); - irq_nested_enable(irqflags); kprintf("0x%x is already mapped\n", viraddr); - return 0; + ret = 0; + goto out; } if (flags & MAP_USER_SPACE) @@ -253,8 +243,11 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag tlb_flush_one_page(viraddr); } - spinlock_unlock(pgd_lock); - irq_nested_enable(irqflags); +out: + if (flags & MAP_KERNEL_SPACE) + spinlock_unlock(&kslock); + else + spinlock_irqsave_unlock(&task->pgd_lock); return ret; } @@ -335,7 +328,6 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags) size_t vm_alloc(uint32_t npages, uint32_t flags) { task_t* task = per_core(current_task); - spinlock_t* pgd_lock; size_t viraddr, i, j, ret = 0; size_t start, end; page_table_t* pgt; @@ -344,11 +336,9 @@ size_t vm_alloc(uint32_t npages, uint32_t flags) return 0; if (flags & MAP_KERNEL_SPACE) { - pgd_lock = &kslock; start = (((size_t) &kernel_end) + 10*PAGE_SIZE) & PAGE_MASK; end = (KERNEL_SPACE - PAGE_SIZE) & PAGE_MASK; } else { - pgd_lock = &task->pgd_lock; start = KERNEL_SPACE & PAGE_MASK; end = PAGE_MASK; } @@ -356,7 +346,10 @@ size_t vm_alloc(uint32_t npages, uint32_t flags) if (BUILTIN_EXPECT(!npages, 0)) return 0; - spinlock_lock(pgd_lock); + if (flags & MAP_KERNEL_SPACE) + spinlock_lock(&kslock); + else + spinlock_irqsave_lock(&task->pgd_lock); viraddr = i = start; j = 0; @@ -403,7 +396,10 @@ size_t vm_alloc(uint32_t npages, uint32_t flags) if ((j >= npages) && (viraddr < end)) ret = viraddr; - spinlock_unlock(pgd_lock); + if (flags & MAP_KERNEL_SPACE) + spinlock_unlock(&kslock); + else + spinlock_irqsave_unlock(&task->pgd_lock); return ret; } @@ -411,7 +407,6 @@ size_t vm_alloc(uint32_t npages, uint32_t flags) int unmap_region(size_t viraddr, uint32_t npages) { task_t* task = per_core(current_task); - spinlock_t* pgd_lock; page_table_t* pgt; size_t i; uint16_t idx_pd4, idx_dirp; @@ -421,11 +416,9 @@ int unmap_region(size_t viraddr, uint32_t npages) return -EINVAL; if (viraddr <= KERNEL_SPACE) - pgd_lock = &kslock; + spinlock_lock(&kslock); else - pgd_lock = &task->pgd_lock; - - spinlock_lock(pgd_lock); + spinlock_irqsave_lock(&task->pgd_lock); i = 0; while(ipgd_lock); return 0; } @@ -478,7 +474,6 @@ int unmap_region(size_t viraddr, uint32_t npages) int vm_free(size_t viraddr, uint32_t npages) { task_t* task = per_core(current_task); - spinlock_t* pgd_lock; page_table_t* pgt; size_t i; uint16_t idx_pd4, idx_dirp; @@ -488,11 +483,9 @@ int vm_free(size_t viraddr, uint32_t npages) return -EINVAL; if (viraddr <= KERNEL_SPACE) - pgd_lock = &kslock; + spinlock_lock(&kslock); else - pgd_lock = &task->pgd_lock; - - spinlock_lock(pgd_lock); + spinlock_irqsave_lock(&task->pgd_lock); i = 0; while(ipgd_lock); return 0; }