metalsvm/mm/memory.c

335 lines
8.8 KiB
C

/*
* Copyright 2010 Stefan Lankes, Chair for Operating Systems,
* RWTH Aachen University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This file is part of MetalSVM.
*/
#include <metalsvm/stdio.h>
#include <metalsvm/string.h>
#include <metalsvm/stdlib.h>
#include <metalsvm/memory.h>
#include <metalsvm/spinlock.h>
#include <metalsvm/time.h>
#include <metalsvm/processor.h>
#include <metalsvm/page.h>
#include <metalsvm/errno.h>
#ifdef CONFIG_MULTIBOOT
#include <asm/multiboot.h>
#endif
#ifdef CONFIG_ROCKCREEK
#include <asm/RCCE.h>
#include <asm/RCCE_lib.h>
#include <asm/SCC_API.h>
#include <asm/icc.h>
#endif
/*
* Set whole address space as occupied:
* 0 => free, 1 => occupied
*/
static uint8_t bitmap[BITMAP_SIZE] = {[0 ... BITMAP_SIZE-1] = 0xFF};
static spinlock_t bitmap_lock = SPINLOCK_INIT;
atomic_int32_t total_pages = ATOMIC_INIT(0);
atomic_int32_t total_allocated_pages = ATOMIC_INIT(0);
atomic_int32_t total_available_pages = ATOMIC_INIT(0);
/*
* Note that linker symbols are not variables, they have no memory allocated for
* maintaining a value, rather their address is their value.
*/
extern const void kernel_start;
extern const void kernel_end;
inline static int page_marked(size_t i)
{
size_t index = i >> 3;
size_t mod = i & 0x7;
return (bitmap[index] & (1 << mod));
}
inline static void page_set_mark(size_t i)
{
size_t index = i >> 3;
size_t mod = i & 0x7;
bitmap[index] = bitmap[index] | (1 << mod);
}
inline static void page_clear_mark(size_t i)
{
size_t index = i / 8;
size_t mod = i % 8;
bitmap[index] = bitmap[index] & ~(1 << mod);
}
size_t get_pages(size_t npages)
{
size_t cnt, off;
if (BUILTIN_EXPECT(!npages, 0))
return 0;
if (BUILTIN_EXPECT(npages > atomic_int32_read(&total_available_pages), 0))
return 0;
spinlock_lock(&bitmap_lock);
off = 1;
while (off <= BITMAP_SIZE*8 - npages) {
for (cnt=0; cnt<npages; cnt++) {
if (page_marked(off+cnt))
goto next;
}
for (cnt=0; cnt<npages; cnt++) {
page_set_mark(off+cnt);
}
spinlock_unlock(&bitmap_lock);
atomic_int32_add(&total_allocated_pages, npages);
atomic_int32_sub(&total_available_pages, npages);
return off << PAGE_BITS;
next: off += cnt+1;
}
spinlock_unlock(&bitmap_lock);
return 0;
}
int put_pages(size_t phyaddr, size_t npages)
{
size_t i, ret = 0;
size_t base = phyaddr >> PAGE_BITS;
if (BUILTIN_EXPECT(!phyaddr, 0))
return -EINVAL;
if (BUILTIN_EXPECT(!npages, 0))
return -EINVAL;
spinlock_lock(&bitmap_lock);
for (i=0; i<npages; i++) {
if (page_marked(base+i)) {
page_clear_mark(base+i);
ret++;
}
}
spinlock_unlock(&bitmap_lock);
atomic_int32_sub(&total_allocated_pages, ret);
atomic_int32_add(&total_available_pages, ret);
kprintf("put_pages: phyaddr=%#lx, npages = %d, ret = %d\n",
phyaddr, npages, ret); // TODO: remove
return ret;
}
int copy_page(size_t pdest, size_t psrc)
{
static size_t viraddr;
if (!viraddr) { // statically allocate virtual memory area
viraddr = vma_alloc(2 * PAGE_SIZE, VMA_HEAP);
if (BUILTIN_EXPECT(!viraddr, 0))
return -ENOMEM;
}
// map pages
size_t vsrc = map_region(viraddr, psrc, 1, MAP_KERNEL_SPACE);
size_t vdest = map_region(viraddr + PAGE_SIZE, pdest, 1, MAP_KERNEL_SPACE);
if (BUILTIN_EXPECT(!vsrc || !vdest, 0)) {
unmap_region(viraddr, 2);
return -ENOMEM;
}
kprintf("copy_page: copy page frame from: %#lx (%#lx) to %#lx (%#lx)\n", vsrc, psrc, vdest, pdest); // TODO remove
// copy the whole page
memcpy((void*) vdest, (void*) vsrc, PAGE_SIZE);
// householding
unmap_region(viraddr, 2);
return 0;
}
int mmu_init(void)
{
unsigned int i;
size_t addr;
int ret = 0;
#ifdef CONFIG_MULTIBOOT
if (mb_info) {
if (mb_info->flags & MULTIBOOT_INFO_MEM_MAP) {
multiboot_memory_map_t* mmap = (multiboot_memory_map_t*) ((size_t) mb_info->mmap_addr);
multiboot_memory_map_t* mmap_end = (void*) ((size_t) mb_info->mmap_addr + mb_info->mmap_length);
// mark available memory as free
while (mmap < mmap_end) {
if (mmap->type == MULTIBOOT_MEMORY_AVAILABLE) {
for (addr=mmap->addr; addr < mmap->addr + mmap->len; addr += PAGE_SIZE) {
page_clear_mark(addr >> PAGE_BITS);
atomic_int32_inc(&total_pages);
atomic_int32_inc(&total_available_pages);
}
}
mmap++;
}
}
else if (mb_info->flags & MULTIBOOT_INFO_MEM) {
size_t page;
size_t pages_lower = mb_info->mem_lower >> 2; /* KiB to page number */
size_t pages_upper = mb_info->mem_upper >> 2;
for (page=0; page<pages_lower; page++)
page_clear_mark(page);
for (page=0; page<pages_upper; page++)
page_clear_mark(page + 256); /* 1 MiB == 256 pages offset */
atomic_int32_add(&total_pages, pages_lower + pages_upper);
atomic_int32_add(&total_available_pages, pages_lower + pages_upper);
}
else {
kputs("Unable to initialize the memory management subsystem\n");
while (1) HALT;
}
// mark mb_info as used
page_set_mark((size_t) mb_info >> PAGE_BITS);
atomic_int32_inc(&total_allocated_pages);
atomic_int32_dec(&total_available_pages);
// mark modules list as used
if (mb_info->flags & MULTIBOOT_INFO_MODS) {
for(addr=mb_info->mods_addr; addr<mb_info->mods_addr+mb_info->mods_count*sizeof(multiboot_module_t); addr+=PAGE_SIZE) {
page_set_mark(addr >> PAGE_BITS);
atomic_int32_inc(&total_allocated_pages);
atomic_int32_dec(&total_available_pages);
}
}
}
#elif defined(CONFIG_ROCKCREEK)
// of course, the first slots belong to the private memory
for(addr=0x00; addr<1*0x1000000; addr+=PAGE_SIZE) {
page_clear_mark(addr >> PAGE_BITS);
if (addr > addr + PAGE_SIZE)
break;
atomic_int32_inc(&total_pages);
atomic_int32_inc(&total_available_pages);
}
// Note: The last slot belongs always to the private memory.
for(addr=0xFF000000; addr<0xFFFFFFFF; addr+=PAGE_SIZE) {
page_clear_mark(addr >> PAGE_BITS);
if (addr > addr + PAGE_SIZE)
break;
atomic_int32_inc(&total_pages);
atomic_int32_inc(&total_available_pages);
}
// mark the bootinfo as used.
page_set_mark((size_t)bootinfo >> PAGE_BITS);
atomic_int32_inc(&total_allocated_pages);
atomic_int32_dec(&total_available_pages);
#else
#error Currently, MetalSVM supports only the Multiboot specification or the RockCreek processor!
#endif
// mark kernel as used
for(addr=(size_t) &kernel_start; addr<(size_t) &kernel_end; addr+=PAGE_SIZE) {
page_set_mark(addr >> PAGE_BITS);
atomic_int32_inc(&total_allocated_pages);
atomic_int32_dec(&total_available_pages);
}
#if MAX_CORES > 1
page_set_mark(SMP_SETUP_ADDR >> PAGE_BITS);
atomic_int32_inc(&total_allocated_pages);
atomic_int32_dec(&total_available_pages);
#endif
// enable paging and map SMP, VGA, Multiboot modules etc.
ret = paging_init();
if (BUILTIN_EXPECT(ret, 0)) {
kprintf("Failed to initialize paging: %d\n", ret);
return ret;
}
ret = vma_init();
if (BUILTIN_EXPECT(ret, 0)) {
kprintf("Failed to initialize VMA regions: %d\n", ret);
return ret;
}
#ifdef CONFIG_MULTIBOOT
/*
* Modules like the init ram disk are already loaded.
* Therefore, we set these pages as used.
*/
if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MODS)) {
multiboot_module_t* mmodule = (multiboot_module_t*) ((size_t) mb_info->mods_addr);
for(i=0; i<mb_info->mods_count; i++) {
for(addr=mmodule[i].mod_start; addr<mmodule[i].mod_end; addr+=PAGE_SIZE) {
page_set_mark(addr >> PAGE_BITS);
atomic_int32_inc(&total_allocated_pages);
atomic_int32_dec(&total_available_pages);
}
}
}
#elif defined(CONFIG_ROCKCREEK)
/*
* Now, we are able to read the FPGA registers and to
* determine the number of slots for private memory.
*/
uint32_t slots = *((volatile uint8_t*) (FPGA_BASE + 0x8244));
if (slots == 0)
slots = 1;
kprintf("MetalSVM use %d slots for private memory\n", slots);
// define the residual private slots as free
for(addr=1*0x1000000; addr<slots*0x1000000; addr+=PAGE_SIZE) {
page_clear_mark(addr >> PAGE_BITS);
if (addr > addr + PAGE_SIZE)
break;
atomic_int32_inc(&total_pages);
atomic_int32_inc(&total_available_pages);
}
/*
* The init ram disk are already loaded.
* Therefore, we set these pages as used.
*/
for(addr=bootinfo->addr; addr<bootinfo->addr+bootinfo->size; addr+=PAGE_SIZE) {
// this area is already mapped, so we need to virt_to_phys() these addresses.
page_set_mark(virt_to_phys(addr) >> PAGE_BITS);
atomic_int32_inc(&total_allocated_pages);
atomic_int32_dec(&total_available_pages);
}
#endif
return ret;
}