metalsvm/mm/memory.c
stefan 45219bce2e - huge commit
- enable the paging support
- redesign of the APIC code

TODO:
- Currently, we are not able to start user-level applications.
- The RTL8139 driver does not longer work. Perhaps, a bug in the output function.
- The APIC codes doesn't work on all systems. Therefore, the code is currently disabled.



git-svn-id: http://svn.lfbs.rwth-aachen.de/svn/scc/trunk/MetalSVM@326 315a16e6-25f9-4109-90ae-ca3045a26c18
2010-12-10 06:16:58 +00:00

340 lines
8 KiB
C

/*
* Copyright 2010 Stefan Lankes, Chair for Operating Systems,
* RWTH Aachen University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This file is part of MetalSVM.
*/
#include <metalsvm/stdio.h>
#include <metalsvm/string.h>
#include <metalsvm/stdlib.h>
#include <metalsvm/mmu.h>
#include <metalsvm/spinlock.h>
#include <metalsvm/time.h>
#include <metalsvm/elf.h>
#include <metalsvm/processor.h>
#include <metalsvm/page.h>
#ifdef CONFIG_MULTIBOOT
#include <asm/multiboot.h>
#endif
#ifdef CONFIG_ROCKCREEK
#include <asm/scc.h>
#endif
/*
* 0 => free
* 1 => occupied
*
* Set whole address space as occupied
*/
static uint8_t bitmap[BITMAP_SIZE] = {[0 ... BITMAP_SIZE-1] = 0xFF};
static spinlock_t bitmap_lock = SPINLOCK_INIT;
static size_t alloc_start;
atomic_int32_t total_pages = ATOMIC_INIT(0);
atomic_int32_t total_allocated_pages = ATOMIC_INIT(0);
atomic_int32_t total_available_pages = ATOMIC_INIT(0);
atomic_int32_t total_kernel_pages = ATOMIC_INIT(0);
/*
* Note that linker symbols are not variables, they have no memory allocated for
* maintaining a value, rather their address is their value.
*/
extern const void kernel_start;
extern const void kernel_end;
inline static int page_marked(unsigned int i)
{
unsigned int index = i / 8;
unsigned int mod = i % 8;
return (bitmap[index] & (1 << mod));
}
inline static int page_unmarked(unsigned int i)
{
return !page_marked(i);
}
inline static void page_set_mark(unsigned int i)
{
unsigned int index = i / 8;
unsigned int mod = i % 8;
//if (page_marked(i))
// kprintf("page %u is alread marked\n", i);
bitmap[index] = bitmap[index] | (1 << mod);
}
inline static void page_clear_mark(unsigned int i)
{
unsigned int index = i / 8;
unsigned int mod = i % 8;
if (page_unmarked(i))
kprintf("page %u is already unmarked\n", i);
bitmap[index] = bitmap[index] & ~(1 << mod);
}
int mmu_init(void)
{
size_t kernel_size;
unsigned int i;
size_t addr, end_addr;
#ifdef CONFIG_MULTIBOOT
if (mb_info && (mb_info->flags & (1 << 6))) {
multiboot_memory_map_t* mmap = (multiboot_memory_map_t*) mb_info->mmap_addr;
multiboot_memory_map_t* mmap_end = (void*) ((size_t) mb_info->mmap_addr + mb_info->mmap_length);
while (mmap < mmap_end) {
if (mmap->type == MULTIBOOT_MEMORY_AVAILABLE) {
/* set the available memory as "unused" */
addr = mmap->addr;
end_addr = addr + mmap->len;
while (addr < end_addr) {
page_clear_mark(addr / PAGE_SIZE);
addr += PAGE_SIZE;
atomic_int32_inc(&total_pages);
atomic_int32_inc(&total_available_pages);
}
}
mmap++;
}
} else {
kputs("Unable to initialize the memory management subsystem\n");
while(1) {
NOP8;
}
}
/*
* Modules like the init ram disk are already loaded.
* Therefore, we set these pages as used.
*/
if (mb_info && (mb_info->flags & (1 << 3))) {
multiboot_module_t* mmodule = (multiboot_module_t*) mb_info->mods_addr;
for(i=0; i<mb_info->mods_count; i++, mmodule++) {
for(addr=mmodule->mod_start; addr<mmodule->mod_end; addr+=PAGE_SIZE) {
page_set_mark(addr / PAGE_SIZE);
atomic_int32_inc(&total_allocated_pages);
atomic_int32_dec(&total_available_pages);
}
}
}
#elif defined(CONFIG_ROCKCREEK)
for(i=0; i<SCC_PMEM_REGIONS; i++) {
addr = scc_info.private_mem[i].low;
end_addr = scc_info.private_mem[i].high;
while(addr < end_addr) {
page_clear_mark(addr / PAGE_SIZE);
if (addr < addr + PAGE_SIZE)
break;
addr += PAGE_SIZE;
atomic_int32_inc(&total_pages);
atomic_int32_inc(&total_available_pages);
}
}
#else
#error Currently, MetalSVM supports only the Multiboot specification or the RockCreek processor!
#endif
kernel_size = (size_t) &kernel_end - (size_t) &kernel_start;
if (kernel_size % PAGE_SIZE)
kernel_size += PAGE_SIZE - kernel_size % PAGE_SIZE;
atomic_int32_add(&total_allocated_pages, kernel_size/PAGE_SIZE);
atomic_int32_sub(&total_available_pages, kernel_size/PAGE_SIZE);
/* set kernel space as used */
for(i=(size_t) &kernel_start / PAGE_SIZE; i < (size_t) &kernel_end / PAGE_SIZE; i++)
page_set_mark(i);
if ((size_t) &kernel_end % PAGE_SIZE)
page_set_mark(i);
alloc_start = (size_t) &kernel_end / PAGE_SIZE;
if ((size_t) &kernel_end % PAGE_SIZE)
alloc_start++;
return paging_init();
}
/*
* Use first fit algorithm to find a suitable physical memory region
*/
static size_t task_get_pages(task_t* task, uint32_t npages)
{
uint32_t i, j, l;
uint32_t k = 0;
size_t ret = 0;
if (BUILTIN_EXPECT(!npages, 0))
return ret;
spinlock_lock(&bitmap_lock);
i = alloc_start;
next_try:
while((k < BITMAP_SIZE) && page_marked(i)) {
k++;
i = (i+1) % BITMAP_SIZE;
}
if (k >= BITMAP_SIZE)
goto oom;
for(j=1; (j<npages) && (i+j < BITMAP_SIZE) && (k < BITMAP_SIZE); j++, k++) {
if (page_marked(i+j)) {
i = (i+j) % BITMAP_SIZE;
goto next_try;
}
}
if (i+j >= BITMAP_SIZE) {
i = 0;
goto next_try;
}
if (k >= BITMAP_SIZE)
goto oom;
ret = i*PAGE_SIZE;
//kprintf("alloc: ret 0x%x, i = %d, j = %d, npages = %d\n", ret, i, j, npages);
for(l=i; l<i+j; l++)
page_set_mark(l);
alloc_start = i+j;
spinlock_unlock(&bitmap_lock);
atomic_int32_add(&total_allocated_pages, npages);
atomic_int32_sub(&total_available_pages, npages);
if (task && task->ustack)
atomic_int32_add(&(task->mem_usage), npages);
else
atomic_int32_add(&total_kernel_pages, npages);
return ret;
oom:
spinlock_unlock(&bitmap_lock);
return ret;
}
size_t get_pages(uint32_t npages)
{
return task_get_pages(per_core(current_task), npages);
}
void* mem_allocation(size_t sz, uint32_t flags)
{
size_t phyaddr, viraddr;
uint32_t npages = sz / PAGE_SIZE;
task_t* task = per_core(current_task);
if (sz % PAGE_SIZE)
npages++;
phyaddr = task_get_pages(task, npages);
if (BUILTIN_EXPECT(!phyaddr, 0))
return 0;
spinlock_lock(task->pgd_lock);
viraddr = map_region(task, 0, phyaddr, npages, flags);
spinlock_unlock(task->pgd_lock);
return (void*) viraddr;
}
void* kmalloc(size_t sz)
{
uint32_t flags;
task_t* task = per_core(current_task);
if (task->ustack)
flags = MAP_USER_SPACE|MAP_HEAP;
else
flags = MAP_KERNEL_SPACE|MAP_HEAP;
return mem_allocation(sz, flags);
}
void* create_stack(task_t* task, size_t sz)
{
size_t viraddr;
uint32_t npages = sz / PAGE_SIZE;
if (sz % PAGE_SIZE)
npages++;
spinlock_lock(task->pgd_lock);
size_t addr = (size_t) task_get_pages(task, npages);
if (BUILTIN_EXPECT(!addr, 0))
return 0;
spinlock_lock(task->pgd_lock);
/*
* We need only user-level stacks. Kernel stacks already initialized as
* static array.
*/
viraddr = map_region(task, 0, addr, npages, MAP_USER_SPACE|MAP_STACK);
spinlock_unlock(task->pgd_lock);
memset((unsigned char*)viraddr, 0xCD, sz);
return (void*) viraddr;
}
static void task_free(task_t* task, void* addr, size_t sz)
{
uint32_t index, npages, i;
size_t phyaddr;
if (BUILTIN_EXPECT(!addr && !sz, 0))
return;
npages = sz / PAGE_SIZE;
if (sz % PAGE_SIZE)
npages++;
spinlock_lock(task->pgd_lock);
vm_free(task, (size_t) addr, npages);
spinlock_unlock(task->pgd_lock);
spinlock_lock(&bitmap_lock);
for(i=0; i<npages; i++) {
phyaddr = virt_to_phys(task, (size_t) addr+i*PAGE_SIZE);
index = phyaddr / PAGE_SIZE;
page_unmarked(index);
}
spinlock_unlock(&bitmap_lock);
atomic_int32_sub(&total_allocated_pages, npages);
atomic_int32_add(&total_available_pages, npages);
if (task && task->ustack)
atomic_int32_sub(&(task->mem_usage), npages);
else
atomic_int32_sub(&total_kernel_pages, npages);
}
void kfree(void* addr, size_t sz)
{
task_free(per_core(current_task), addr, sz);
}