avoid deadlock by disabling interrupts in map_region

- the page_fault handler uses also map_region and implicitly its spinlock
- risk of a deadlock
This commit is contained in:
Stefan Lankes 2012-08-01 21:12:55 +02:00
parent 80e01f8b9f
commit e2550fbdb2
2 changed files with 17 additions and 0 deletions

View file

@ -267,6 +267,7 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
page_table_t* pgt;
size_t index, i;
size_t ret;
uint32_t irqflags;
if (BUILTIN_EXPECT(!task || !task->pgd, 0))
return 0;
@ -279,12 +280,16 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
else
pgd_lock = &task->pgd_lock;
// avoid interrupts because the IRQ handler is able to call map_region
irqflags = irq_nested_disable();
spinlock_lock(pgd_lock);
if (!viraddr) {
viraddr = vm_alloc(npages, flags);
if (BUILTIN_EXPECT(!viraddr, 0)) {
spinlock_unlock(pgd_lock);
irq_nested_enable(irqflags);
kputs("map_adress: found no valid virtual address\n");
return 0;
}
@ -301,6 +306,7 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
pgt = (page_table_t*) get_pages(1);
if (BUILTIN_EXPECT(!pgt, 0)) {
spinlock_unlock(pgd_lock);
irq_nested_enable(irqflags);
kputs("map_address: out of memory\n");
return 0;
}
@ -321,6 +327,7 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
if (BUILTIN_EXPECT(!pgt_container, 0)) {
spinlock_unlock(pgd_lock);
irq_nested_enable(irqflags);
kputs("map_address: internal error\n");
return 0;
}
@ -342,6 +349,7 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
index = (viraddr >> 12) & 0x3FF;
if (pgt->entries[index] && !(flags & MAP_REMAP)) {
spinlock_unlock(pgd_lock);
irq_nested_enable(irqflags);
kprintf("0x%x is already mapped\n", viraddr);
return 0;
}
@ -382,6 +390,7 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
}
spinlock_unlock(pgd_lock);
irq_nested_enable(irqflags);
return ret;
}

View file

@ -163,6 +163,7 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
spinlock_t* pgd_lock;
page_table_t* pgt;
size_t i, ret;
uint32_t irqflags;
if (BUILTIN_EXPECT(!task || !task->pgd, 0))
return 0;
@ -175,6 +176,8 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
else
pgd_lock = &task->pgd_lock;
// avoid interrupts because the IRQ handler is able to call map_region
irqflags = irq_nested_disable();
spinlock_lock(pgd_lock);
if (!viraddr) {
@ -196,6 +199,7 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
pgt = (page_table_t*) (task->pgd->entries[idx_pd4] & PAGE_MASK);
if (!pgt) {
spinlock_unlock(pgd_lock);
irq_nested_enable(irqflags);
kputs("map_region: out of memory\n");
return 0;
}
@ -203,6 +207,7 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
pgt = (page_table_t*) (pgt->entries[idx_dirp] & PAGE_MASK);
if (!pgt) {
spinlock_unlock(pgd_lock);
irq_nested_enable(irqflags);
kputs("map_region: out of memory\n");
return 0;
}
@ -210,6 +215,7 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
pgt = (page_table_t*) (pgt->entries[idx_dir] & PAGE_MASK);
if (!pgt) {
spinlock_unlock(pgd_lock);
irq_nested_enable(irqflags);
kputs("map_region: out of memory\n");
return 0;
}
@ -222,6 +228,7 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
if (pgt->entries[idx_table] && !(flags & MAP_REMAP)) {
spinlock_unlock(pgd_lock);
irq_nested_enable(irqflags);
kprintf("0x%x is already mapped\n", viraddr);
return 0;
}
@ -247,6 +254,7 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
}
spinlock_unlock(pgd_lock);
irq_nested_enable(irqflags);
return ret;
}