remove memory leak and redesign of the internal memory accounting

This commit is contained in:
Stefan Lankes 2011-03-04 22:44:53 +01:00
parent c21b1bf8a2
commit 6f092641ba
5 changed files with 88 additions and 56 deletions

View file

@ -25,7 +25,6 @@
#define __ARCH_PAGE_H__
#include <metalsvm/stddef.h>
//#include <metalsvm/tasks_types.h>
#include <metalsvm/stdlib.h>
#define _PAGE_BIT_PRESENT 0 /* is present */
@ -84,6 +83,11 @@ size_t vm_alloc(uint32_t npages, uint32_t flags);
*/
int vm_free(size_t addr, uint32_t npages);
/*
* Unmap the physical memory at a specific virtual address
*/
int unmap_region(size_t viraddr, uint32_t npages);
/*
* Maps a physical memory region at a specific virtual address.
* If the virtual address is zero, this functions allocates a valid virtual address on demand.

View file

@ -65,12 +65,11 @@ page_dir_t* get_boot_pgd(void)
* No PGD locking is needed because onls creat_pgd use this function and holds already the
* PGD lock.
*/
inline static size_t copy_page_table(uint32_t pgd_index, page_table_t* pgt, int* counter)
inline static size_t copy_page_table(task_t* task, uint32_t pgd_index, page_table_t* pgt, int* counter)
{
uint32_t i;
page_table_t* new_pgt;
size_t phyaddr;
task_t* curr_task = per_core(current_task);
if (BUILTIN_EXPECT(!pgt, 0))
return 0;
@ -94,17 +93,12 @@ inline static size_t copy_page_table(uint32_t pgd_index, page_table_t* pgt, int*
new_pgt->entries[i] = phyaddr | (pgt->entries[i] & 0xFFF);
// only the child use the copy
atomic_int32_sub(&curr_task->mem_usage, 1);
atomic_int32_inc(&task->user_usage);
}
}
phyaddr = virt_to_phys((size_t)new_pgt);
// only the child use the copy => unmap copy
if (!vm_free((size_t)new_pgt, 1))
atomic_int32_sub(&curr_task->mem_usage, 1);
return phyaddr;
}
@ -131,7 +125,6 @@ int create_pgd(task_t* task, int copy)
if (!pgd)
return -ENOMEM;
memset(pgd, 0, sizeof(page_dir_t));
counter++;
// create a new "page table container" for the new task
pgt = kmalloc(sizeof(page_table_t));
@ -140,7 +133,6 @@ int create_pgd(task_t* task, int copy)
return -ENOMEM;
}
memset(pgt, 0, sizeof(page_table_t));
counter++;
spinlock_lock(&kslock);
@ -170,7 +162,7 @@ int create_pgd(task_t* task, int copy)
if (!(curr_task->pgd->entries[i]))
continue;
phyaddr = copy_page_table(i, (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + i*PAGE_SIZE) & 0xFFFFF000), &counter);
phyaddr = copy_page_table(task, i, (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + i*PAGE_SIZE) & 0xFFFFF000), &counter);
if (phyaddr)
pgd->entries[i] = (phyaddr & 0xFFFFF000) | (curr_task->pgd->entries[i] & 0xFFF);
}
@ -178,12 +170,6 @@ int create_pgd(task_t* task, int copy)
spinlock_unlock(&curr_task->pgd_lock);
}
// frees the virtual regions, because only the new child task need access to the new pgd and pgt
//if (!vm_free((size_t)pgt, 1))
// atomic_int32_sub(&curr_task->mem_usage, 1);
//if (!vm_free((size_t)pgd, 1))
// atomic_int32_sub(&curr_task->mem_usage, 1);
return counter;
}
@ -192,21 +178,22 @@ int create_pgd(task_t* task, int copy)
*/
int drop_pgd(void)
{
uint32_t i;
page_dir_t* pgd = per_core(current_task)->pgd;
size_t phy_pgd = virt_to_phys((size_t) pgd);
task_t* task = per_core(current_task);
uint32_t i;
if (BUILTIN_EXPECT(pgd == &boot_pgd, 0))
return -EINVAL;
spinlock_lock(&task->pgd_lock);
for(i=KERNEL_SPACE/(1024*PAGE_SIZE); i<1024; i++)
for(i=KERNEL_SPACE/(1024*PAGE_SIZE); i<1024; i++) {
if (pgd->entries[i] & 0xFFFFF000) {
put_page(pgd->entries[i] & 0xFFFFF000);
pgd->entries[i] = 0;
}
}
// freeing the page directory
put_page(phy_pgd);
@ -347,6 +334,9 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
if (flags & MAP_NO_CACHE)
pgt->entries[index] |= PG_PCD;
if (flags & MAP_USER_SPACE)
atomic_int32_inc(&task->user_usage);
tlb_flush_one_page(viraddr);
}
@ -463,6 +453,45 @@ size_t vm_alloc(uint32_t npages, uint32_t flags)
return ret;
}
int unmap_region(size_t viraddr, uint32_t npages)
{
task_t* task = per_core(current_task);
spinlock_t* pgd_lock;
uint32_t i;
uint32_t index1, index2;
page_table_t* pgt;
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
return -EINVAL;
if (viraddr <= KERNEL_SPACE)
pgd_lock = &kslock;
else
pgd_lock = &task->pgd_lock;
spinlock_lock(pgd_lock);
for(i=0; i<npages; i++, viraddr+=PAGE_SIZE)
{
index1 = viraddr >> 22;
index2 = (viraddr >> 12) & 0x3FF;
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & 0xFFFFF000);
if (!pgt)
continue;
pgt->entries[index2] &= ~PG_PRESENT;
if (viraddr > KERNEL_SPACE)
atomic_int32_dec(&task->user_usage);
tlb_flush_one_page(viraddr);
}
spinlock_unlock(pgd_lock);
return 0;
}
int vm_free(size_t viraddr, uint32_t npages)
{
task_t* task = per_core(current_task);
@ -490,6 +519,8 @@ int vm_free(size_t viraddr, uint32_t npages)
if (!pgt)
continue;
pgt->entries[index2] = 0;
tlb_flush_one_page(viraddr);
}
spinlock_unlock(pgd_lock);

View file

@ -43,7 +43,7 @@ struct page_dir;
typedef struct task {
tid_t id; /* task id = position in the task table */
uint32_t status;
atomic_int32_t mem_usage; /* in number of pages */
atomic_int32_t user_usage; /* in number of pages */
spinlock_t pgd_lock; /* avoids concurrent access to the page directoriy */
struct page_dir* pgd; /* pointer to the page directory */
spinlock_t vma_lock;

View file

@ -47,7 +47,7 @@ int multitasking_init(void) {
if (task_table[0].status == TASK_INVALID) {
task_table[0].id = 0;
task_table[0].status = TASK_RUNNING;
atomic_int32_set(&task_table[0].mem_usage, 0);
atomic_int32_set(&task_table[0].user_usage, 0);
mailbox_wait_msg_init(&task_table[0].inbox);
memset(task_table[0].outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS);
per_core(current_task) = task_table+0;
@ -97,8 +97,8 @@ static void NORETURN do_exit(int arg) {
drop_pgd(); // delete page directory and its page tables
if (atomic_int32_read(&per_core(current_task)->mem_usage))
kprintf("Memory leak! Task %d did not release %d pages\n", per_core(current_task)->id, atomic_int32_read(&per_core(current_task)->mem_usage));
if (atomic_int32_read(&per_core(current_task)->user_usage))
kprintf("Memory leak! Task %d did not release %d pages\n", per_core(current_task)->id, atomic_int32_read(&per_core(current_task)->user_usage));
per_core(current_task)->status = TASK_FINISHED;
reschedule();
@ -135,6 +135,8 @@ static int create_task(tid_t* id, entry_point_t ep, void* arg)
for(i=0; i<MAX_TASKS; i++) {
if (task_table[i].status == TASK_INVALID) {
atomic_int32_set(&task_table[i].user_usage, 0);
ret = create_pgd(task_table+i, 0);
if (ret < 0) {
ret = -ENOMEM;
@ -142,8 +144,6 @@ static int create_task(tid_t* id, entry_point_t ep, void* arg)
}
task_table[i].id = i;
// at least one page is already created for the pgd
atomic_int32_set(&task_table[i].mem_usage, ret);
spinlock_init(&task_table[i].vma_lock);
task_table[i].vma_list = NULL;
mailbox_wait_msg_init(&task_table[i].inbox);
@ -170,12 +170,17 @@ int sys_fork(void)
{
int ret = -ENOMEM;
unsigned int i;
task_t* parent = per_core(current_task);
task_t* parent_task = per_core(current_task);
vma_t** child;
vma_t* parent;
vma_t* tmp;
spinlock_lock_irqsave(&table_lock);
for(i=0; i<MAX_TASKS; i++) {
if (task_table[i].status == TASK_INVALID) {
atomic_int32_set(&task_table[i].user_usage, 0);
ret = create_pgd(task_table+i, 1);
if (ret < 0) {
ret = -ENOMEM;
@ -183,32 +188,28 @@ int sys_fork(void)
}
task_table[i].id = i;
// at least one page is already created for the pgd
atomic_int32_set(&task_table[i].mem_usage, ret);
spinlock_init(&task_table[i].vma_lock);
task_table[i].vma_list = NULL;
/*if (copy) {
vma_t** child = &task_table[i].vma_list;
vma_t* parent = per_core(current_task)->vma_list;
vma_t* tmp = NULL;
while(parent) {
*child = (vma_t*) kmalloc(sizeof(vma_t));
if (BUILTIN_EXPECT(!child, 0))
break;
atomic_int32_inc(&task_table[i].mem_usage);
// copy VMA list
child = &task_table[i].vma_list;
parent = per_core(current_task)->vma_list;
tmp = NULL;
(*child)->start = parent->start;
(*child)->end = parent->end;
(*child)->type = parent->type;
(*child)->prev = tmp;
(*child)->next = NULL;
while(parent) {
*child = (vma_t*) kmalloc(sizeof(vma_t));
if (BUILTIN_EXPECT(!child, 0))
break;
parent = parent->next;
tmp = *child;
child = &((*child)->next);
}
}*/
(*child)->start = parent->start;
(*child)->end = parent->end;
(*child)->type = parent->type;
(*child)->prev = tmp;
(*child)->next = NULL;
parent = parent->next;
tmp = *child;
child = &((*child)->next);
}
mailbox_wait_msg_init(&task_table[i].inbox);
memset(task_table[i].outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS);
@ -216,7 +217,7 @@ int sys_fork(void)
ret = arch_fork(task_table+i);
if (parent != per_core(current_task))
if (parent_task != per_core(current_task))
return 0; // Oh, the new child! => leave function
if (!ret) {

View file

@ -179,7 +179,6 @@ int mmu_init(void)
*/
size_t get_pages(uint32_t npages)
{
task_t* task = per_core(current_task);
uint32_t i, j, l;
uint32_t k = 0;
size_t ret = 0;
@ -226,7 +225,6 @@ next_try:
atomic_int32_add(&total_allocated_pages, npages);
atomic_int32_sub(&total_available_pages, npages);
atomic_int32_add(&(task->mem_usage), npages);
return ret;
@ -239,7 +237,6 @@ oom:
int put_page(size_t phyaddr)
{
uint32_t index = phyaddr / PAGE_SIZE;
task_t* task = per_core(current_task);
if (BUILTIN_EXPECT(!phyaddr, 0))
return -EINVAL;
@ -250,7 +247,6 @@ int put_page(size_t phyaddr)
atomic_int32_sub(&total_allocated_pages, 1);
atomic_int32_add(&total_available_pages, 1);
atomic_int32_sub(&(task->mem_usage), 1);
return 0;
}
@ -279,7 +275,6 @@ void* kmalloc(size_t sz)
void kfree(void* addr, size_t sz)
{
task_t* task = per_core(current_task);
uint32_t index, npages, i;
size_t phyaddr;
@ -292,6 +287,8 @@ void kfree(void* addr, size_t sz)
spinlock_lock(&bitmap_lock);
for(i=0; i<npages; i++) {
unmap_region((size_t) addr+i*PAGE_SIZE, 1);
phyaddr = virt_to_phys((size_t) addr+i*PAGE_SIZE);
if (!phyaddr)
continue;
@ -306,5 +303,4 @@ void kfree(void* addr, size_t sz)
atomic_int32_sub(&total_allocated_pages, npages);
atomic_int32_add(&total_available_pages, npages);
atomic_int32_sub(&(task->mem_usage), npages);
}