/* * Copyright 2010 Stefan Lankes, Chair for Operating Systems, * RWTH Aachen University * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * This file is part of MetalSVM. */ #include #include #include #include #include #include #include #include #include #ifdef CONFIG_MULTIBOOT #include #endif #ifdef CONFIG_ROCKCREEK #include #endif /* * 0 => free * 1 => occupied * * Set whole address space as occupied */ static uint8_t bitmap[BITMAP_SIZE] = {[0 ... BITMAP_SIZE-1] = 0xFF}; static spinlock_t bitmap_lock = SPINLOCK_INIT; static size_t alloc_start; atomic_int32_t total_pages = ATOMIC_INIT(0); atomic_int32_t total_allocated_pages = ATOMIC_INIT(0); atomic_int32_t total_available_pages = ATOMIC_INIT(0); atomic_int32_t total_kernel_pages = ATOMIC_INIT(0); /* * Note that linker symbols are not variables, they have no memory allocated for * maintaining a value, rather their address is their value. */ extern const void kernel_start; extern const void kernel_end; inline static int page_marked(unsigned int i) { unsigned int index = i / 8; unsigned int mod = i % 8; return (bitmap[index] & (1 << mod)); } inline static int page_unmarked(unsigned int i) { return !page_marked(i); } inline static void page_set_mark(unsigned int i) { unsigned int index = i / 8; unsigned int mod = i % 8; //if (page_marked(i)) // kprintf("page %u is alread marked\n", i); bitmap[index] = bitmap[index] | (1 << mod); } inline static void page_clear_mark(unsigned int i) { unsigned int index = i / 8; unsigned int mod = i % 8; if (page_unmarked(i)) kprintf("page %u is already unmarked\n", i); bitmap[index] = bitmap[index] & ~(1 << mod); } int mmu_init(void) { size_t kernel_size; unsigned int i; size_t addr, end_addr; #ifdef CONFIG_MULTIBOOT if (mb_info && (mb_info->flags & (1 << 6))) { multiboot_memory_map_t* mmap = (multiboot_memory_map_t*) mb_info->mmap_addr; multiboot_memory_map_t* mmap_end = (void*) ((size_t) mb_info->mmap_addr + mb_info->mmap_length); while (mmap < mmap_end) { if (mmap->type == MULTIBOOT_MEMORY_AVAILABLE) { /* set the available memory as "unused" */ addr = mmap->addr; end_addr = addr + mmap->len; while (addr < end_addr) { page_clear_mark(addr / PAGE_SIZE); addr += PAGE_SIZE; atomic_int32_inc(&total_pages); atomic_int32_inc(&total_available_pages); } } mmap++; } } else { kputs("Unable to initialize the memory management subsystem\n"); while(1) { NOP8; } } /* * Modules like the init ram disk are already loaded. * Therefore, we set these pages as used. */ if (mb_info && (mb_info->flags & (1 << 3))) { multiboot_module_t* mmodule = (multiboot_module_t*) mb_info->mods_addr; for(i=0; imods_count; i++, mmodule++) { for(addr=mmodule->mod_start; addrmod_end; addr+=PAGE_SIZE) { page_set_mark(addr / PAGE_SIZE); atomic_int32_inc(&total_allocated_pages); atomic_int32_dec(&total_available_pages); } } } #elif defined(CONFIG_ROCKCREEK) for(i=0; i= BITMAP_SIZE) goto oom; for(j=1; (j= BITMAP_SIZE) { i = 0; goto next_try; } if (k >= BITMAP_SIZE) goto oom; ret = i*PAGE_SIZE; //kprintf("alloc: ret 0x%x, i = %d, j = %d, npages = %d\n", ret, i, j, npages); for(l=i; lustack) atomic_int32_add(&(task->mem_usage), npages); else atomic_int32_add(&total_kernel_pages, npages); return ret; oom: spinlock_unlock(&bitmap_lock); return ret; } size_t get_pages(uint32_t npages) { return task_get_pages(per_core(current_task), npages); } void* mem_allocation(size_t sz, uint32_t flags) { size_t phyaddr, viraddr; uint32_t npages = sz / PAGE_SIZE; task_t* task = per_core(current_task); if (sz % PAGE_SIZE) npages++; phyaddr = task_get_pages(task, npages); if (BUILTIN_EXPECT(!phyaddr, 0)) return 0; spinlock_lock(task->pgd_lock); viraddr = map_region(task, 0, phyaddr, npages, flags); spinlock_unlock(task->pgd_lock); return (void*) viraddr; } void* kmalloc(size_t sz) { uint32_t flags; task_t* task = per_core(current_task); if (task->ustack) flags = MAP_USER_SPACE|MAP_HEAP; else flags = MAP_KERNEL_SPACE|MAP_HEAP; return mem_allocation(sz, flags); } void* create_stack(task_t* task, size_t sz) { size_t viraddr; uint32_t npages = sz / PAGE_SIZE; if (sz % PAGE_SIZE) npages++; spinlock_lock(task->pgd_lock); size_t addr = (size_t) task_get_pages(task, npages); if (BUILTIN_EXPECT(!addr, 0)) return 0; spinlock_lock(task->pgd_lock); /* * We need only user-level stacks. Kernel stacks already initialized as * static array. */ viraddr = map_region(task, 0, addr, npages, MAP_USER_SPACE|MAP_STACK); spinlock_unlock(task->pgd_lock); memset((unsigned char*)viraddr, 0xCD, sz); return (void*) viraddr; } static void task_free(task_t* task, void* addr, size_t sz) { uint32_t index, npages, i; size_t phyaddr; if (BUILTIN_EXPECT(!addr && !sz, 0)) return; npages = sz / PAGE_SIZE; if (sz % PAGE_SIZE) npages++; spinlock_lock(task->pgd_lock); vm_free(task, (size_t) addr, npages); spinlock_unlock(task->pgd_lock); spinlock_lock(&bitmap_lock); for(i=0; iustack) atomic_int32_sub(&(task->mem_usage), npages); else atomic_int32_sub(&total_kernel_pages, npages); } void kfree(void* addr, size_t sz) { task_free(per_core(current_task), addr, sz); }