
- enable the paging support - redesign of the APIC code TODO: - Currently, we are not able to start user-level applications. - The RTL8139 driver does not longer work. Perhaps, a bug in the output function. - The APIC codes doesn't work on all systems. Therefore, the code is currently disabled. git-svn-id: http://svn.lfbs.rwth-aachen.de/svn/scc/trunk/MetalSVM@326 315a16e6-25f9-4109-90ae-ca3045a26c18
394 lines
11 KiB
C
394 lines
11 KiB
C
/*
|
|
* Copyright 2010 Stefan Lankes, Chair for Operating Systems,
|
|
* RWTH Aachen University
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*
|
|
* This file is part of MetalSVM.
|
|
*/
|
|
|
|
#include <metalsvm/stddef.h>
|
|
#include <metalsvm/stdio.h>
|
|
#include <metalsvm/stdlib.h>
|
|
#include <metalsvm/mmu.h>
|
|
#include <metalsvm/string.h>
|
|
#include <metalsvm/page.h>
|
|
#include <metalsvm/spinlock.h>
|
|
#include <metalsvm/processor.h>
|
|
#include <metalsvm/tasks.h>
|
|
#include <metalsvm/errno.h>
|
|
#include <asm/irq.h>
|
|
#include <asm/multiboot.h>
|
|
|
|
/*
|
|
* Virtual Memory Layout of the standard configuration
|
|
* (1 GB kernel space)
|
|
*
|
|
* 0x00000000 - 0x000FFFFF: reserved for IO devices
|
|
* 0x00100000 - 0x0DEADFFF: Kernel (size depends on the configuration)
|
|
* 0xDEAE0000 - 0x3FFFEFFF: Kernel heap
|
|
* 0x3FFFF000 - 0x3FFFFFFF: Page Table are mapped in this region
|
|
* (The first 256 entries belongs to kernel space)
|
|
*/
|
|
|
|
/*
|
|
* Note that linker symbols are not variables, they have no memory allocated for
|
|
* maintaining a value, rather their address is their value.
|
|
*/
|
|
extern const void kernel_start;
|
|
extern const void kernel_end;
|
|
|
|
// boot task's page directory and page directory lock
|
|
static page_dir_t boot_pgd = {{[0 ... 1023] = 0}};
|
|
static spinlock_t boot_pgd_lock = SPINLOCK_INIT;
|
|
static int paging_enabled = 0;
|
|
|
|
int get_kernel_pgd(task_t* task)
|
|
{
|
|
if (BUILTIN_EXPECT(!task, 0))
|
|
return -EINVAL;
|
|
|
|
task->pgd = &boot_pgd;
|
|
task->pgd_lock = &boot_pgd_lock;
|
|
|
|
return 0;
|
|
}
|
|
|
|
size_t virt_to_phys(task_t* task, size_t viraddr)
|
|
{
|
|
uint32_t index1, index2;
|
|
page_table_t* pgt;
|
|
size_t ret = 0;
|
|
|
|
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
|
|
return 0;
|
|
|
|
index1 = viraddr >> 22;
|
|
index2 = (viraddr >> 12) & 0x3FF;
|
|
|
|
if (!(task->pgd->entries[index1] & 0xFFFFF000))
|
|
goto out;
|
|
|
|
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*4) & 0xFFFFF000);
|
|
if (!pgt || !(pgt->entries[index2]))
|
|
goto out;
|
|
|
|
ret = pgt->entries[index2] & 0xFFFFF000; // determine page frame
|
|
ret = ret | (viraddr & 0xFFF); // add page offset
|
|
out:
|
|
//kprintf("vir %p to phy %p\n", viraddr, ret);
|
|
|
|
return ret;
|
|
}
|
|
|
|
size_t map_region(task_t* task, size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flags)
|
|
{
|
|
page_table_t* pgt;
|
|
size_t index, i;
|
|
size_t ret;
|
|
|
|
if (BUILTIN_EXPECT(!task || !task->pgd || !phyaddr, 0))
|
|
return 0;
|
|
|
|
if (BUILTIN_EXPECT(!paging_enabled && (viraddr != phyaddr), 0))
|
|
return 0;
|
|
|
|
if (!(flags & MAP_KERNEL_SPACE))
|
|
return 0;
|
|
|
|
if (!viraddr) {
|
|
viraddr = vm_alloc(task, npages, flags);
|
|
if (BUILTIN_EXPECT(!viraddr, 0)) {
|
|
kputs("map_adress: found no valid virtual address\n");
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
ret = viraddr;
|
|
//kprintf("map %d pages from %p to %p\n", npages, phyaddr, ret);
|
|
for(i=0; i<npages; i++, viraddr+=PAGE_SIZE, phyaddr+=PAGE_SIZE) {
|
|
index = viraddr >> 22;
|
|
|
|
if (!(task->pgd->entries[index])) {
|
|
page_table_t* pgt_container;
|
|
|
|
pgt = (page_table_t*) get_pages(1);
|
|
if (BUILTIN_EXPECT(!pgt, 0)) {
|
|
spinlock_unlock(task->pgd_lock);
|
|
kputs("map_address: out of memory\n");
|
|
return 0;
|
|
}
|
|
|
|
// set the new page table into the directory
|
|
task->pgd->entries[index] = (uint32_t)pgt|KERN_TABLE;
|
|
|
|
// if paging is already enabled, we need to use the virtual address
|
|
if (paging_enabled)
|
|
// we already know the virtual address of the "page table container"
|
|
// (see file header)
|
|
pgt_container = (page_table_t*) ((KERNEL_SPACE - PAGE_SIZE) & 0xFFFFF000);
|
|
else
|
|
pgt_container = (page_table_t*) (task->pgd->entries[(KERNEL_SPACE - PAGE_SIZE) >> 22] & 0xFFFFF000);
|
|
|
|
if (BUILTIN_EXPECT(!pgt_container, 0)) {
|
|
spinlock_unlock(task->pgd_lock);
|
|
kputs("map_address: internal error\n");
|
|
return 0;
|
|
}
|
|
|
|
// map the new table into the address space of the kernel space
|
|
pgt_container->entries[index] = ((size_t) pgt)|KERN_PAGE;
|
|
|
|
// clear the page table
|
|
if (paging_enabled)
|
|
memset((void*) (KERNEL_SPACE - 1024*PAGE_SIZE + index*4), 0, PAGE_SIZE);
|
|
else
|
|
memset(pgt, 0, PAGE_SIZE);
|
|
} else pgt = (page_table_t*) (task->pgd->entries[index] & 0xFFFFF000);
|
|
|
|
/* convert physical address to virtual */
|
|
if (paging_enabled)
|
|
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index*4) & 0xFFFFF000);
|
|
|
|
index = (viraddr >> 12) & 0x3FF;
|
|
if (BUILTIN_EXPECT(pgt->entries[index], 0)) {
|
|
spinlock_unlock(task->pgd_lock);
|
|
kprintf("0x%x is already maped\n");
|
|
return 0;
|
|
}
|
|
|
|
pgt->entries[index] = KERN_PAGE|(phyaddr & 0xFFFFF000);
|
|
if (flags & MAP_NO_CACHE)
|
|
pgt->entries[index] |= PG_PCD;
|
|
|
|
tlb_flush_one_page(viraddr);
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* Use the first fit algorithm to find a valid address range
|
|
*
|
|
* TODO: O(n) => bad performance, we need a better approach
|
|
*/
|
|
size_t vm_alloc(task_t* task, uint32_t npages, uint32_t flags)
|
|
{
|
|
uint32_t index1, index2, j;
|
|
size_t viraddr, i;
|
|
size_t start, end;
|
|
page_table_t* pgt;
|
|
|
|
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
|
|
return 0;
|
|
|
|
if (flags & MAP_KERNEL_SPACE) {
|
|
start = (((size_t) &kernel_end) + PAGE_SIZE) & 0xFFFFF000;
|
|
end = (KERNEL_SPACE - 2*PAGE_SIZE) & 0xFFFFF000; // we need 1 PAGE for our PGTs
|
|
} else {
|
|
start = KERNEL_SPACE & 0xFFFFF000;
|
|
end = 0xFFFFF000;
|
|
}
|
|
|
|
if (BUILTIN_EXPECT(!npages, 0))
|
|
return 0;
|
|
|
|
viraddr = i = start;
|
|
j = 0;
|
|
do {
|
|
index1 = i >> 22;
|
|
index2 = (i >> 12) & 0x3FF;
|
|
|
|
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*4) & 0xFFFFF000);
|
|
if (!pgt || !(pgt->entries[index2])) {
|
|
i+=PAGE_SIZE;
|
|
j++;
|
|
} else {
|
|
// restart search
|
|
j = 0;
|
|
viraddr = i + PAGE_SIZE;
|
|
i = i + PAGE_SIZE;
|
|
}
|
|
} while((j < npages) && (i<=end));
|
|
|
|
if ((j >= npages) && (viraddr < end))
|
|
return viraddr;
|
|
|
|
return 0;
|
|
}
|
|
|
|
int vm_free(task_t* task, size_t viraddr, uint32_t npages)
|
|
{
|
|
uint32_t i;
|
|
uint32_t index1, index2;
|
|
page_table_t* pgt;
|
|
|
|
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
|
|
return -EINVAL;
|
|
|
|
for(i=0; i<npages; i++, viraddr+=PAGE_SIZE)
|
|
{
|
|
index1 = viraddr >> 22;
|
|
index2 = (viraddr >> 12) & 0x3FF;
|
|
|
|
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*4) & 0xFFFFF000);
|
|
if (!pgt)
|
|
continue;
|
|
pgt->entries[index2] = 0;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
int print_paging_tree(size_t viraddr)
|
|
{
|
|
uint32_t index1, index2;
|
|
page_dir_t* pgd = NULL;
|
|
page_table_t* pgt = NULL;
|
|
|
|
if (BUILTIN_EXPECT(!viraddr, 0))
|
|
return -EINVAL;
|
|
|
|
index1 = viraddr >> 22;
|
|
index2 = (viraddr >> 12) & 0x3FF;
|
|
|
|
kprintf("Paging dump of address 0x%x\n", viraddr);
|
|
pgd = per_core(current_task)->pgd;
|
|
kprintf("\tPage directory entry %u: ", index1);
|
|
if (pgd) {
|
|
kprintf("0x%0x\n", pgd->entries[index1]);
|
|
pgt = (page_table_t*) (pgd->entries[index1] & 0xFFFFF000);
|
|
} else
|
|
kputs("invalid page directory\n");
|
|
|
|
/* convert physical address to virtual */
|
|
if (paging_enabled && pgt)
|
|
pgt = (page_table_t*) (KERNEL_SPACE - 1024*PAGE_SIZE + index1*4);
|
|
|
|
kprintf("\tPage table entry %u: ", index2);
|
|
if (pgt)
|
|
kprintf("0x%x\n", pgt->entries[index2]);
|
|
else
|
|
kputs("invalid page table\n");
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void pagefault_handler(struct state *s)
|
|
{
|
|
kprintf("PAGE FAULT: Task %u got page fault at irq %u\n", per_core(current_task)->id, s->int_no);
|
|
kprintf("Register state: eax = 0x%x, ebx = 0x%x, ecx = 0x%x, edx = 0x%x, edi = 0x%x, esi = 0x%x, ebp = 0x%x, esp = 0x%x\n",
|
|
s->eax, s->ebx, s->ecx, s->edx, s->edi, s->esi, s->ebp, s->esp);
|
|
|
|
abort();
|
|
}
|
|
|
|
int arch_paging_init(void)
|
|
{
|
|
uint32_t i, npages, index1, index2;
|
|
page_table_t* pgt;
|
|
size_t viraddr;
|
|
|
|
// uninstall default handler and install our own
|
|
irq_uninstall_handler(14);
|
|
irq_install_handler(14, pagefault_handler);
|
|
|
|
// Create a page table to reference to the other page tables
|
|
pgt = (page_table_t*) get_pages(1);
|
|
if (!pgt) {
|
|
kputs("arch_paging_init: Not enough memory!\n");
|
|
return -ENOMEM;
|
|
}
|
|
memset(pgt, 0, PAGE_SIZE);
|
|
|
|
// map this table at the end of the kernel space
|
|
viraddr = KERNEL_SPACE - PAGE_SIZE;
|
|
index1 = viraddr >> 22;
|
|
index2 = (viraddr >> 12) & 0x3FF;
|
|
|
|
// now, we create a self reference
|
|
per_core(current_task)->pgd->entries[index1] = (((size_t) pgt) & 0xFFFFF000)|USER_TABLE;
|
|
pgt->entries[index2] = ((size_t) pgt & 0xFFFFF000)|KERN_PAGE;
|
|
|
|
/*
|
|
* Set the page table and page directory entries for the kernel. We map the kernel's physical address
|
|
* to the same virtual address.
|
|
*/
|
|
npages = ((size_t) &kernel_end - (size_t) &kernel_start) / PAGE_SIZE;
|
|
if ((size_t)&kernel_end % PAGE_SIZE)
|
|
npages++;
|
|
map_region(per_core(current_task), (size_t)&kernel_start, (size_t)&kernel_start, npages, MAP_KERNEL_SPACE);
|
|
|
|
#ifdef CONFIG_VGA
|
|
// map the video memory into the kernel space
|
|
map_region(per_core(current_task), VIDEO_MEM_ADDR, VIDEO_MEM_ADDR, 1, MAP_KERNEL_SPACE|MAP_NO_CACHE);
|
|
#endif
|
|
|
|
#ifdef CONFIG_MULTIBOOT
|
|
/*
|
|
* of course, mb_info has to map into the kernel space
|
|
*/
|
|
if (mb_info)
|
|
map_region(per_core(current_task), (size_t) mb_info, (size_t) mb_info, 1, MAP_KERNEL_SPACE);
|
|
|
|
/*
|
|
* Map reserved memory regions into the kernel space
|
|
*/
|
|
if (mb_info && (mb_info->flags & (1 << 6))) {
|
|
multiboot_memory_map_t* mmap = (multiboot_memory_map_t*) mb_info->mmap_addr;
|
|
multiboot_memory_map_t* mmap_end = (void*) ((size_t) mb_info->mmap_addr + mb_info->mmap_length);
|
|
|
|
while (mmap < mmap_end) {
|
|
if (mmap->type != MULTIBOOT_MEMORY_AVAILABLE) {
|
|
npages = mmap->len / PAGE_SIZE;
|
|
if ((mmap->addr+mmap->len) % PAGE_SIZE)
|
|
npages++;
|
|
map_region(per_core(current_task), mmap->addr, mmap->addr, npages, MAP_KERNEL_SPACE|MAP_NO_CACHE);
|
|
}
|
|
mmap++;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Modules like the init ram disk are already loaded.
|
|
* Therefore, we map these moduels into the kernel space.
|
|
*/
|
|
if (mb_info && (mb_info->flags & (1 << 3))) {
|
|
multiboot_module_t* mmodule = (multiboot_module_t*) mb_info->mods_addr;
|
|
|
|
for(i=0; i<mb_info->mods_count; i++, mmodule++) {
|
|
// map physical address to the same virtual address
|
|
npages = (mmodule->mod_end - mmodule->mod_start) / PAGE_SIZE;
|
|
if (mmodule->mod_end % PAGE_SIZE)
|
|
npages++;
|
|
map_region(per_core(current_task), (size_t) (mmodule->mod_start), (size_t) (mmodule->mod_start), npages, MAP_KERNEL_SPACE);
|
|
}
|
|
}
|
|
#endif
|
|
|
|
/* enable paging */
|
|
write_cr3((uint32_t) &boot_pgd);
|
|
i = read_cr0();
|
|
i = i | (1 << 31);
|
|
write_cr0(i);
|
|
paging_enabled = 1;
|
|
|
|
/*
|
|
* we turned on paging
|
|
* => now, we are able to register our task for Task State Switching
|
|
*/
|
|
register_task(per_core(current_task));
|
|
|
|
return 0;
|
|
}
|
|
|