use irqsave page directory lock to avoid deadlocks
This commit is contained in:
parent
7aea508931
commit
e1c3ddbc8d
3 changed files with 48 additions and 53 deletions
|
@ -172,7 +172,7 @@ int create_pgd(task_t* task, int copy)
|
||||||
task->pgd = pgd;
|
task->pgd = pgd;
|
||||||
|
|
||||||
if (copy) {
|
if (copy) {
|
||||||
spinlock_lock(&curr_task->pgd_lock);
|
spinlock_irqsave_lock(&curr_task->pgd_lock);
|
||||||
|
|
||||||
for (i=KERNEL_SPACE/(1024*PAGE_SIZE); i<1024; i++) {
|
for (i=KERNEL_SPACE/(1024*PAGE_SIZE); i<1024; i++) {
|
||||||
if (!(curr_task->pgd->entries[i]))
|
if (!(curr_task->pgd->entries[i]))
|
||||||
|
@ -187,7 +187,7 @@ int create_pgd(task_t* task, int copy)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
spinlock_unlock(&curr_task->pgd_lock);
|
spinlock_irqsave_unlock(&curr_task->pgd_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
return counter;
|
return counter;
|
||||||
|
@ -206,7 +206,7 @@ int drop_pgd(void)
|
||||||
if (BUILTIN_EXPECT(pgd == &boot_pgd, 0))
|
if (BUILTIN_EXPECT(pgd == &boot_pgd, 0))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
spinlock_lock(&task->pgd_lock);
|
spinlock_irqsave_lock(&task->pgd_lock);
|
||||||
|
|
||||||
for(i=0; i<PGT_ENTRIES; i++) {
|
for(i=0; i<PGT_ENTRIES; i++) {
|
||||||
if (pgd->entries[i] & PG_USER) {
|
if (pgd->entries[i] & PG_USER) {
|
||||||
|
@ -220,7 +220,7 @@ int drop_pgd(void)
|
||||||
|
|
||||||
task->pgd = NULL;
|
task->pgd = NULL;
|
||||||
|
|
||||||
spinlock_unlock(&task->pgd_lock);
|
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -238,7 +238,7 @@ size_t virt_to_phys(size_t viraddr)
|
||||||
if (BUILTIN_EXPECT(!task || !task->pgd, 0))
|
if (BUILTIN_EXPECT(!task || !task->pgd, 0))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
spinlock_lock(&task->pgd_lock);
|
spinlock_irqsave_lock(&task->pgd_lock);
|
||||||
|
|
||||||
index1 = viraddr >> 22;
|
index1 = viraddr >> 22;
|
||||||
index2 = (viraddr >> 12) & 0x3FF;
|
index2 = (viraddr >> 12) & 0x3FF;
|
||||||
|
@ -255,7 +255,7 @@ size_t virt_to_phys(size_t viraddr)
|
||||||
out:
|
out:
|
||||||
//kprintf("vir %p to phy %p\n", viraddr, ret);
|
//kprintf("vir %p to phy %p\n", viraddr, ret);
|
||||||
|
|
||||||
spinlock_unlock(&task->pgd_lock);
|
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -263,11 +263,9 @@ out:
|
||||||
size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flags)
|
size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flags)
|
||||||
{
|
{
|
||||||
task_t* task = per_core(current_task);
|
task_t* task = per_core(current_task);
|
||||||
spinlock_t* pgd_lock;
|
|
||||||
page_table_t* pgt;
|
page_table_t* pgt;
|
||||||
size_t index, i;
|
size_t index, i;
|
||||||
size_t ret;
|
size_t ret;
|
||||||
uint32_t irqflags;
|
|
||||||
|
|
||||||
if (BUILTIN_EXPECT(!task || !task->pgd, 0))
|
if (BUILTIN_EXPECT(!task || !task->pgd, 0))
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -276,22 +274,16 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (flags & MAP_KERNEL_SPACE)
|
if (flags & MAP_KERNEL_SPACE)
|
||||||
pgd_lock = &kslock;
|
spinlock_lock(&kslock);
|
||||||
else
|
else
|
||||||
pgd_lock = &task->pgd_lock;
|
spinlock_irqsave_lock(&task->pgd_lock);
|
||||||
|
|
||||||
// avoid interrupts because the IRQ handler is able to call map_region
|
|
||||||
irqflags = irq_nested_disable();
|
|
||||||
|
|
||||||
spinlock_lock(pgd_lock);
|
|
||||||
|
|
||||||
if (!viraddr) {
|
if (!viraddr) {
|
||||||
viraddr = vm_alloc(npages, flags);
|
viraddr = vm_alloc(npages, flags);
|
||||||
if (BUILTIN_EXPECT(!viraddr, 0)) {
|
if (BUILTIN_EXPECT(!viraddr, 0)) {
|
||||||
spinlock_unlock(pgd_lock);
|
|
||||||
irq_nested_enable(irqflags);
|
|
||||||
kputs("map_adress: found no valid virtual address\n");
|
kputs("map_adress: found no valid virtual address\n");
|
||||||
return 0;
|
ret = 0;
|
||||||
|
goto out;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -305,10 +297,9 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
|
||||||
|
|
||||||
pgt = (page_table_t*) get_pages(1);
|
pgt = (page_table_t*) get_pages(1);
|
||||||
if (BUILTIN_EXPECT(!pgt, 0)) {
|
if (BUILTIN_EXPECT(!pgt, 0)) {
|
||||||
spinlock_unlock(pgd_lock);
|
|
||||||
irq_nested_enable(irqflags);
|
|
||||||
kputs("map_address: out of memory\n");
|
kputs("map_address: out of memory\n");
|
||||||
return 0;
|
ret = 0;
|
||||||
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
// set the new page table into the directory
|
// set the new page table into the directory
|
||||||
|
@ -326,10 +317,9 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
|
||||||
pgt_container = (page_table_t*) (task->pgd->entries[(KERNEL_SPACE - PAGE_SIZE) >> 22] & PAGE_MASK);
|
pgt_container = (page_table_t*) (task->pgd->entries[(KERNEL_SPACE - PAGE_SIZE) >> 22] & PAGE_MASK);
|
||||||
|
|
||||||
if (BUILTIN_EXPECT(!pgt_container, 0)) {
|
if (BUILTIN_EXPECT(!pgt_container, 0)) {
|
||||||
spinlock_unlock(pgd_lock);
|
|
||||||
irq_nested_enable(irqflags);
|
|
||||||
kputs("map_address: internal error\n");
|
kputs("map_address: internal error\n");
|
||||||
return 0;
|
ret = 0;
|
||||||
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
// map the new table into the address space of the kernel space
|
// map the new table into the address space of the kernel space
|
||||||
|
@ -348,10 +338,9 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
|
||||||
|
|
||||||
index = (viraddr >> 12) & 0x3FF;
|
index = (viraddr >> 12) & 0x3FF;
|
||||||
if (pgt->entries[index] && !(flags & MAP_REMAP)) {
|
if (pgt->entries[index] && !(flags & MAP_REMAP)) {
|
||||||
spinlock_unlock(pgd_lock);
|
|
||||||
irq_nested_enable(irqflags);
|
|
||||||
kprintf("0x%x is already mapped\n", viraddr);
|
kprintf("0x%x is already mapped\n", viraddr);
|
||||||
return 0;
|
ret = 0;
|
||||||
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (flags & MAP_USER_SPACE)
|
if (flags & MAP_USER_SPACE)
|
||||||
|
@ -389,8 +378,11 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
|
||||||
tlb_flush_one_page(viraddr);
|
tlb_flush_one_page(viraddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
spinlock_unlock(pgd_lock);
|
out:
|
||||||
irq_nested_enable(irqflags);
|
if (flags & MAP_KERNEL_SPACE)
|
||||||
|
spinlock_unlock(&kslock);
|
||||||
|
else
|
||||||
|
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -411,7 +403,7 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
|
||||||
if (BUILTIN_EXPECT(!pgd, 0))
|
if (BUILTIN_EXPECT(!pgd, 0))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
spinlock_lock(&task->pgd_lock);
|
spinlock_irqsave_lock(&task->pgd_lock);
|
||||||
|
|
||||||
while (viraddr < end)
|
while (viraddr < end)
|
||||||
{
|
{
|
||||||
|
@ -456,7 +448,7 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
spinlock_unlock(&task->pgd_lock);
|
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -469,7 +461,6 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
|
||||||
size_t vm_alloc(uint32_t npages, uint32_t flags)
|
size_t vm_alloc(uint32_t npages, uint32_t flags)
|
||||||
{
|
{
|
||||||
task_t* task = per_core(current_task);
|
task_t* task = per_core(current_task);
|
||||||
spinlock_t* pgd_lock;
|
|
||||||
uint32_t index1, index2, j;
|
uint32_t index1, index2, j;
|
||||||
size_t viraddr, i, ret = 0;
|
size_t viraddr, i, ret = 0;
|
||||||
size_t start, end;
|
size_t start, end;
|
||||||
|
@ -479,11 +470,9 @@ size_t vm_alloc(uint32_t npages, uint32_t flags)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (flags & MAP_KERNEL_SPACE) {
|
if (flags & MAP_KERNEL_SPACE) {
|
||||||
pgd_lock = &kslock;
|
|
||||||
start = (((size_t) &kernel_end) + PAGE_SIZE) & PAGE_MASK;
|
start = (((size_t) &kernel_end) + PAGE_SIZE) & PAGE_MASK;
|
||||||
end = (KERNEL_SPACE - 2*PAGE_SIZE) & PAGE_MASK; // we need 1 PAGE for our PGTs
|
end = (KERNEL_SPACE - 2*PAGE_SIZE) & PAGE_MASK; // we need 1 PAGE for our PGTs
|
||||||
} else {
|
} else {
|
||||||
pgd_lock = &task->pgd_lock;
|
|
||||||
start = KERNEL_SPACE & PAGE_MASK;
|
start = KERNEL_SPACE & PAGE_MASK;
|
||||||
end = PAGE_MASK;
|
end = PAGE_MASK;
|
||||||
}
|
}
|
||||||
|
@ -491,7 +480,10 @@ size_t vm_alloc(uint32_t npages, uint32_t flags)
|
||||||
if (BUILTIN_EXPECT(!npages, 0))
|
if (BUILTIN_EXPECT(!npages, 0))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
spinlock_lock(pgd_lock);
|
if (flags & MAP_KERNEL_SPACE)
|
||||||
|
spinlock_lock(&kslock);
|
||||||
|
else
|
||||||
|
spinlock_irqsave_lock(&task->pgd_lock);
|
||||||
|
|
||||||
viraddr = i = start;
|
viraddr = i = start;
|
||||||
j = 0;
|
j = 0;
|
||||||
|
@ -514,7 +506,10 @@ size_t vm_alloc(uint32_t npages, uint32_t flags)
|
||||||
if ((j >= npages) && (viraddr < end))
|
if ((j >= npages) && (viraddr < end))
|
||||||
ret = viraddr;
|
ret = viraddr;
|
||||||
|
|
||||||
spinlock_unlock(pgd_lock);
|
if (flags & MAP_KERNEL_SPACE)
|
||||||
|
spinlock_unlock(&kslock);
|
||||||
|
else
|
||||||
|
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -522,7 +517,6 @@ size_t vm_alloc(uint32_t npages, uint32_t flags)
|
||||||
int unmap_region(size_t viraddr, uint32_t npages)
|
int unmap_region(size_t viraddr, uint32_t npages)
|
||||||
{
|
{
|
||||||
task_t* task = per_core(current_task);
|
task_t* task = per_core(current_task);
|
||||||
spinlock_t* pgd_lock;
|
|
||||||
uint32_t i;
|
uint32_t i;
|
||||||
uint32_t index1, index2;
|
uint32_t index1, index2;
|
||||||
page_table_t* pgt;
|
page_table_t* pgt;
|
||||||
|
@ -531,11 +525,9 @@ int unmap_region(size_t viraddr, uint32_t npages)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (viraddr <= KERNEL_SPACE)
|
if (viraddr <= KERNEL_SPACE)
|
||||||
pgd_lock = &kslock;
|
spinlock_lock(&kslock);
|
||||||
else
|
else
|
||||||
pgd_lock = &task->pgd_lock;
|
spinlock_irqsave_lock(&task->pgd_lock);
|
||||||
|
|
||||||
spinlock_lock(pgd_lock);
|
|
||||||
|
|
||||||
for(i=0; i<npages; i++, viraddr+=PAGE_SIZE)
|
for(i=0; i<npages; i++, viraddr+=PAGE_SIZE)
|
||||||
{
|
{
|
||||||
|
@ -553,7 +545,10 @@ int unmap_region(size_t viraddr, uint32_t npages)
|
||||||
tlb_flush_one_page(viraddr);
|
tlb_flush_one_page(viraddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
spinlock_unlock(pgd_lock);
|
if (viraddr <= KERNEL_SPACE)
|
||||||
|
spinlock_unlock(&kslock);
|
||||||
|
else
|
||||||
|
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -561,7 +556,6 @@ int unmap_region(size_t viraddr, uint32_t npages)
|
||||||
int vm_free(size_t viraddr, uint32_t npages)
|
int vm_free(size_t viraddr, uint32_t npages)
|
||||||
{
|
{
|
||||||
task_t* task = per_core(current_task);
|
task_t* task = per_core(current_task);
|
||||||
spinlock_t* pgd_lock;
|
|
||||||
uint32_t i;
|
uint32_t i;
|
||||||
uint32_t index1, index2;
|
uint32_t index1, index2;
|
||||||
page_table_t* pgt;
|
page_table_t* pgt;
|
||||||
|
@ -570,11 +564,9 @@ int vm_free(size_t viraddr, uint32_t npages)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (viraddr <= KERNEL_SPACE)
|
if (viraddr <= KERNEL_SPACE)
|
||||||
pgd_lock = &kslock;
|
spinlock_lock(&kslock);
|
||||||
else
|
else
|
||||||
pgd_lock = &task->pgd_lock;
|
spinlock_irqsave_lock(&task->pgd_lock);
|
||||||
|
|
||||||
spinlock_lock(pgd_lock);
|
|
||||||
|
|
||||||
for(i=0; i<npages; i++, viraddr+=PAGE_SIZE)
|
for(i=0; i<npages; i++, viraddr+=PAGE_SIZE)
|
||||||
{
|
{
|
||||||
|
@ -589,7 +581,10 @@ int vm_free(size_t viraddr, uint32_t npages)
|
||||||
tlb_flush_one_page(viraddr);
|
tlb_flush_one_page(viraddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
spinlock_unlock(pgd_lock);
|
if (viraddr <= KERNEL_SPACE)
|
||||||
|
spinlock_unlock(&kslock);
|
||||||
|
else
|
||||||
|
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -607,7 +602,7 @@ int print_paging_tree(size_t viraddr)
|
||||||
index1 = viraddr >> 22;
|
index1 = viraddr >> 22;
|
||||||
index2 = (viraddr >> 12) & 0x3FF;
|
index2 = (viraddr >> 12) & 0x3FF;
|
||||||
|
|
||||||
spinlock_lock(&task->pgd_lock);
|
spinlock_irqsave_lock(&task->pgd_lock);
|
||||||
|
|
||||||
kprintf("Paging dump of address 0x%x\n", viraddr);
|
kprintf("Paging dump of address 0x%x\n", viraddr);
|
||||||
pgd = task->pgd;
|
pgd = task->pgd;
|
||||||
|
@ -628,7 +623,7 @@ int print_paging_tree(size_t viraddr)
|
||||||
else
|
else
|
||||||
kputs("invalid page table\n");
|
kputs("invalid page table\n");
|
||||||
|
|
||||||
spinlock_unlock(&task->pgd_lock);
|
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -89,7 +89,7 @@ typedef struct task {
|
||||||
/// usage in number of pages
|
/// usage in number of pages
|
||||||
atomic_int32_t user_usage;
|
atomic_int32_t user_usage;
|
||||||
/// avoids concurrent access to the page directory
|
/// avoids concurrent access to the page directory
|
||||||
spinlock_t pgd_lock;
|
spinlock_irqsave_t pgd_lock;
|
||||||
/// pointer to the page directory
|
/// pointer to the page directory
|
||||||
struct page_dir* pgd;
|
struct page_dir* pgd;
|
||||||
/// lock for the VMA_list
|
/// lock for the VMA_list
|
||||||
|
|
|
@ -47,8 +47,8 @@
|
||||||
* A task's id will be its position in this array.
|
* A task's id will be its position in this array.
|
||||||
*/
|
*/
|
||||||
static task_t task_table[MAX_TASKS] = { \
|
static task_t task_table[MAX_TASKS] = { \
|
||||||
[0] = {0, TASK_IDLE, NULL, NULL, 0, 0, 0, NULL, NULL, 0, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, NULL, 0, 0, 0, 0}, \
|
[0] = {0, TASK_IDLE, NULL, NULL, 0, 0, 0, NULL, NULL, 0, ATOMIC_INIT(0), SPINLOCK_IRQSAVE_INIT, NULL, SPINLOCK_INIT, NULL, NULL, 0, 0, 0, 0}, \
|
||||||
[1 ... MAX_TASKS-1] = {0, TASK_INVALID, NULL, NULL, 0, 0, 0, NULL, NULL, 0, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, NULL, 0, 0, 0, 0}};
|
[1 ... MAX_TASKS-1] = {0, TASK_INVALID, NULL, NULL, 0, 0, 0, NULL, NULL, 0, ATOMIC_INIT(0), SPINLOCK_IRQSAVE_INIT, NULL, SPINLOCK_INIT, NULL, NULL, 0, 0, 0, 0}};
|
||||||
static spinlock_irqsave_t table_lock = SPINLOCK_IRQSAVE_INIT;
|
static spinlock_irqsave_t table_lock = SPINLOCK_IRQSAVE_INIT;
|
||||||
#ifndef CONFIG_TICKLESS
|
#ifndef CONFIG_TICKLESS
|
||||||
#if MAX_CORES > 1
|
#if MAX_CORES > 1
|
||||||
|
|
Loading…
Add table
Reference in a new issue