397 lines
9.6 KiB
C
397 lines
9.6 KiB
C
/*
|
|
* Copyright 2010 Stefan Lankes, Chair for Operating Systems,
|
|
* RWTH Aachen University
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*
|
|
* This file is part of MetalSVM.
|
|
*/
|
|
|
|
#include <metalsvm/stdio.h>
|
|
#include <metalsvm/string.h>
|
|
#include <metalsvm/stdlib.h>
|
|
#include <metalsvm/mmu.h>
|
|
#include <metalsvm/spinlock.h>
|
|
#include <metalsvm/time.h>
|
|
#include <metalsvm/processor.h>
|
|
#include <metalsvm/page.h>
|
|
#include <metalsvm/errno.h>
|
|
#ifdef CONFIG_MULTIBOOT
|
|
#include <asm/multiboot.h>
|
|
#endif
|
|
#ifdef CONFIG_ROCKCREEK
|
|
#include <asm/RCCE.h>
|
|
#include <asm/RCCE_lib.h>
|
|
#include <asm/SCC_API.h>
|
|
#include <asm/icc.h>
|
|
#endif
|
|
|
|
/*
|
|
* 0 => free
|
|
* 1 => occupied
|
|
*
|
|
* Set whole address space as occupied
|
|
*/
|
|
static uint8_t bitmap[BITMAP_SIZE]; // = {[0 ... BITMAP_SIZE-1] = 0xFF};
|
|
static spinlock_t bitmap_lock = SPINLOCK_INIT;
|
|
static size_t alloc_start;
|
|
atomic_int32_t total_pages = ATOMIC_INIT(0);
|
|
atomic_int32_t total_allocated_pages = ATOMIC_INIT(0);
|
|
atomic_int32_t total_available_pages = ATOMIC_INIT(0);
|
|
|
|
/*
|
|
* Note that linker symbols are not variables, they have no memory allocated for
|
|
* maintaining a value, rather their address is their value.
|
|
*/
|
|
extern const void kernel_start;
|
|
extern const void kernel_end;
|
|
|
|
inline static int page_marked(size_t i)
|
|
{
|
|
size_t index = i >> 3;
|
|
size_t mod = i & 0x7;
|
|
|
|
return (bitmap[index] & (1 << mod));
|
|
}
|
|
|
|
inline static int page_unmarked(size_t i)
|
|
{
|
|
return !page_marked(i);
|
|
}
|
|
|
|
inline static void page_set_mark(size_t i)
|
|
{
|
|
size_t index = i >> 3;
|
|
size_t mod = i & 0x7;
|
|
|
|
//if (page_marked(i))
|
|
// kprintf("page %u is alread marked\n", i);
|
|
|
|
bitmap[index] = bitmap[index] | (1 << mod);
|
|
}
|
|
|
|
inline static void page_clear_mark(size_t i)
|
|
{
|
|
size_t index = i / 8;
|
|
size_t mod = i % 8;
|
|
|
|
if (page_unmarked(i))
|
|
kprintf("page %u is already unmarked\n", i);
|
|
|
|
bitmap[index] = bitmap[index] & ~(1 << mod);
|
|
}
|
|
|
|
int mmu_init(void)
|
|
{
|
|
size_t kernel_size;
|
|
unsigned int i;
|
|
size_t addr;
|
|
int ret = 0;
|
|
|
|
// at first, set default value of the bitmap
|
|
memset(bitmap, 0xFF, sizeof(uint8_t)*BITMAP_SIZE);
|
|
|
|
#ifdef CONFIG_MULTIBOOT
|
|
if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MEM_MAP)) {
|
|
size_t end_addr;
|
|
multiboot_memory_map_t* mmap = (multiboot_memory_map_t*) ((size_t) mb_info->mmap_addr);
|
|
multiboot_memory_map_t* mmap_end = (void*) ((size_t) mb_info->mmap_addr + mb_info->mmap_length);
|
|
|
|
while (mmap < mmap_end) {
|
|
if (mmap->type == MULTIBOOT_MEMORY_AVAILABLE) {
|
|
/* set the available memory as "unused" */
|
|
addr = mmap->addr;
|
|
end_addr = addr + mmap->len;
|
|
|
|
while (addr < end_addr) {
|
|
page_clear_mark(addr >> PAGE_SHIFT);
|
|
addr += PAGE_SIZE;
|
|
atomic_int32_inc(&total_pages);
|
|
atomic_int32_inc(&total_available_pages);
|
|
}
|
|
}
|
|
mmap++;
|
|
}
|
|
} else {
|
|
kputs("Unable to initialize the memory management subsystem\n");
|
|
while(1) {
|
|
HALT;
|
|
}
|
|
}
|
|
#elif defined(CONFIG_ROCKCREEK)
|
|
/* of course, the first slots belong to the private memory */
|
|
for(addr=0x00; addr<1*0x1000000; addr+=PAGE_SIZE) {
|
|
page_clear_mark(addr >> PAGE_SHIFT);
|
|
if (addr > addr + PAGE_SIZE)
|
|
break;
|
|
atomic_int32_inc(&total_pages);
|
|
atomic_int32_inc(&total_available_pages);
|
|
}
|
|
|
|
// Note: The last slot belongs always to the private memory.
|
|
for(addr=0xFF000000; addr<0xFFFFFFFF; addr+=PAGE_SIZE) {
|
|
page_clear_mark(addr >> PAGE_SHIFT);
|
|
if (addr > addr + PAGE_SIZE)
|
|
break;
|
|
atomic_int32_inc(&total_pages);
|
|
atomic_int32_inc(&total_available_pages);
|
|
}
|
|
|
|
/*
|
|
* Mark the bootinfo as used.
|
|
*/
|
|
page_set_mark((size_t)bootinfo >> PAGE_SHIFT);
|
|
atomic_int32_inc(&total_allocated_pages);
|
|
atomic_int32_dec(&total_available_pages);
|
|
|
|
#else
|
|
#error Currently, MetalSVM supports only the Multiboot specification or the RockCreek processor!
|
|
#endif
|
|
|
|
kernel_size = (size_t) &kernel_end - (size_t) &kernel_start;
|
|
if (kernel_size & (PAGE_SIZE-1))
|
|
kernel_size += PAGE_SIZE - (kernel_size & (PAGE_SIZE-1));
|
|
atomic_int32_add(&total_allocated_pages, kernel_size >> PAGE_SHIFT);
|
|
atomic_int32_sub(&total_available_pages, kernel_size >> PAGE_SHIFT);
|
|
|
|
/* set kernel space as used */
|
|
for(i=(size_t) &kernel_start >> PAGE_SHIFT; i < (size_t) &kernel_end >> PAGE_SHIFT; i++)
|
|
page_set_mark(i);
|
|
if ((size_t) &kernel_end & (PAGE_SIZE-1))
|
|
page_set_mark(i);
|
|
|
|
alloc_start = (size_t) &kernel_end >> PAGE_SHIFT;
|
|
if ((size_t) &kernel_end & (PAGE_SIZE-1))
|
|
alloc_start++;
|
|
|
|
#if MAX_CORES > 1
|
|
// reserve physical page for SMP boot code
|
|
page_set_mark(SMP_SETUP_ADDR >> PAGE_SHIFT);
|
|
atomic_int32_add(&total_allocated_pages, 1);
|
|
atomic_int32_sub(&total_available_pages, 1);
|
|
#endif
|
|
ret = paging_init();
|
|
if (ret) {
|
|
kprintf("Failed to initialize paging: %d\n", ret);
|
|
return ret;
|
|
}
|
|
|
|
#ifdef CONFIG_MULTIBOOT
|
|
/*
|
|
* Modules like the init ram disk are already loaded.
|
|
* Therefore, we set these pages as used.
|
|
*/
|
|
if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MODS)) {
|
|
multiboot_module_t* mmodule = (multiboot_module_t*) ((size_t) mb_info->mods_addr);
|
|
|
|
/*
|
|
* Mark the mb_info as used.
|
|
*/
|
|
page_set_mark((size_t)mb_info >> PAGE_SHIFT);
|
|
atomic_int32_inc(&total_allocated_pages);
|
|
atomic_int32_dec(&total_available_pages);
|
|
|
|
for(addr = mb_info->mods_addr; addr < mb_info->mods_addr + mb_info->mods_count * sizeof(multiboot_module_t); addr += PAGE_SIZE) {
|
|
page_set_mark(addr >> PAGE_SHIFT);
|
|
atomic_int32_inc(&total_allocated_pages);
|
|
atomic_int32_dec(&total_available_pages);
|
|
}
|
|
|
|
for(i=0; i<mb_info->mods_count; i++, mmodule++) {
|
|
for(addr=mmodule->mod_start; addr<mmodule->mod_end; addr+=PAGE_SIZE) {
|
|
page_set_mark(addr >> PAGE_SHIFT);
|
|
atomic_int32_inc(&total_allocated_pages);
|
|
atomic_int32_dec(&total_available_pages);
|
|
}
|
|
}
|
|
}
|
|
#elif defined(CONFIG_ROCKCREEK)
|
|
/*
|
|
* Now, we are able to read the FPGA registers and to
|
|
* determine the number of slots for private memory.
|
|
*/
|
|
uint32_t slots = *((volatile uint8_t*) (FPGA_BASE + 0x8244));
|
|
if (slots == 0)
|
|
slots = 1;
|
|
|
|
kprintf("MetalSVM use %d slots for private memory\n", slots);
|
|
|
|
// define the residual private slots as free
|
|
for(addr=1*0x1000000; addr<slots*0x1000000; addr+=PAGE_SIZE) {
|
|
page_clear_mark(addr >> PAGE_SHIFT);
|
|
if (addr > addr + PAGE_SIZE)
|
|
break;
|
|
atomic_int32_inc(&total_pages);
|
|
atomic_int32_inc(&total_available_pages);
|
|
}
|
|
|
|
/*
|
|
* The init ram disk are already loaded.
|
|
* Therefore, we set these pages as used.
|
|
*/
|
|
for(addr=bootinfo->addr; addr < bootinfo->addr+bootinfo->size; addr+=PAGE_SIZE) {
|
|
// This area is already mapped, so we need to virt_to_phys() these addresses.
|
|
page_set_mark(virt_to_phys(addr) >> PAGE_SHIFT);
|
|
atomic_int32_inc(&total_allocated_pages);
|
|
atomic_int32_dec(&total_available_pages);
|
|
}
|
|
#endif
|
|
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* Use first fit algorithm to find a suitable physical memory region
|
|
*/
|
|
size_t get_pages(uint32_t npages)
|
|
{
|
|
uint32_t i, j, l;
|
|
uint32_t k = 0;
|
|
size_t ret = 0;
|
|
|
|
if (BUILTIN_EXPECT(!npages, 0))
|
|
return ret;
|
|
|
|
if (BUILTIN_EXPECT(npages > atomic_int32_read(&total_available_pages), 0))
|
|
return ret;
|
|
|
|
spinlock_lock(&bitmap_lock);
|
|
i = alloc_start;
|
|
next_try:
|
|
while((k < BITMAP_SIZE) && page_marked(i)) {
|
|
k++;
|
|
i = (i+1) & (BITMAP_SIZE-1);
|
|
}
|
|
|
|
if (k >= BITMAP_SIZE)
|
|
goto oom;
|
|
|
|
for(j=1; (j<npages) && (i+j < BITMAP_SIZE) && (k < BITMAP_SIZE); j++, k++) {
|
|
if (page_marked(i+j)) {
|
|
i = (i+j) & (BITMAP_SIZE-1);
|
|
goto next_try;
|
|
}
|
|
}
|
|
|
|
if (i+j >= BITMAP_SIZE) {
|
|
i = 0;
|
|
goto next_try;
|
|
}
|
|
|
|
if (k >= BITMAP_SIZE)
|
|
goto oom;
|
|
|
|
ret = i*PAGE_SIZE;
|
|
//kprintf("alloc: ret 0x%x, i = %d, j = %d, npages = %d\n", ret, i, j, npages);
|
|
for(l=i; l<i+j; l++)
|
|
page_set_mark(l);
|
|
|
|
alloc_start = i+j;
|
|
spinlock_unlock(&bitmap_lock);
|
|
|
|
atomic_int32_add(&total_allocated_pages, npages);
|
|
atomic_int32_sub(&total_available_pages, npages);
|
|
|
|
return ret;
|
|
|
|
oom:
|
|
spinlock_unlock(&bitmap_lock);
|
|
|
|
return ret;
|
|
}
|
|
|
|
int put_page(size_t phyaddr)
|
|
{
|
|
uint32_t index = phyaddr >> PAGE_SHIFT;
|
|
|
|
if (BUILTIN_EXPECT(!phyaddr, 0))
|
|
return -EINVAL;
|
|
|
|
spinlock_lock(&bitmap_lock);
|
|
page_clear_mark(index);
|
|
spinlock_unlock(&bitmap_lock);
|
|
|
|
atomic_int32_sub(&total_allocated_pages, 1);
|
|
atomic_int32_add(&total_available_pages, 1);
|
|
|
|
return 0;
|
|
}
|
|
|
|
void* mem_allocation(size_t sz, uint32_t flags)
|
|
{
|
|
size_t phyaddr, viraddr;
|
|
uint32_t npages = sz >> PAGE_SHIFT;
|
|
|
|
if (sz & (PAGE_SIZE-1))
|
|
npages++;
|
|
|
|
phyaddr = get_pages(npages);
|
|
if (BUILTIN_EXPECT(!phyaddr, 0))
|
|
return 0;
|
|
|
|
viraddr = map_region(0, phyaddr, npages, flags);
|
|
|
|
return (void*) viraddr;
|
|
}
|
|
|
|
void* kmalloc(size_t sz)
|
|
{
|
|
return mem_allocation(sz, MAP_KERNEL_SPACE);
|
|
}
|
|
|
|
void kfree(void* addr, size_t sz)
|
|
{
|
|
uint32_t index, npages, i;
|
|
size_t phyaddr;
|
|
|
|
if (BUILTIN_EXPECT(!addr && !sz, 0))
|
|
return;
|
|
|
|
npages = sz >> PAGE_SHIFT;
|
|
if (sz & (PAGE_SIZE-1))
|
|
npages++;
|
|
|
|
spinlock_lock(&bitmap_lock);
|
|
for(i=0; i<npages; i++) {
|
|
unmap_region((size_t) addr+i*PAGE_SIZE, 1);
|
|
|
|
phyaddr = virt_to_phys((size_t) addr+i*PAGE_SIZE);
|
|
if (!phyaddr)
|
|
continue;
|
|
|
|
index = phyaddr >> PAGE_SHIFT;
|
|
page_clear_mark(index);
|
|
|
|
}
|
|
spinlock_unlock(&bitmap_lock);
|
|
|
|
vm_free((size_t) addr, npages);
|
|
|
|
atomic_int32_sub(&total_allocated_pages, npages);
|
|
atomic_int32_add(&total_available_pages, npages);
|
|
}
|
|
|
|
void* create_stack(void)
|
|
{
|
|
return kmalloc(KERNEL_STACK_SIZE);
|
|
}
|
|
|
|
int destroy_stack(task_t* task)
|
|
{
|
|
if (BUILTIN_EXPECT(!task || !task->stack, 0))
|
|
return -EINVAL;
|
|
|
|
kfree(task->stack, KERNEL_STACK_SIZE);
|
|
|
|
return 0;
|
|
}
|