2010-12-10 06:16:58 +00:00
|
|
|
/*
|
2014-02-07 11:01:10 +01:00
|
|
|
* Copyright 2014 Steffen Vogel, Chair for Operating Systems,
|
2010-12-10 06:16:58 +00:00
|
|
|
* RWTH Aachen University
|
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*
|
|
|
|
* This file is part of MetalSVM.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <metalsvm/stddef.h>
|
|
|
|
#include <metalsvm/stdio.h>
|
|
|
|
#include <metalsvm/stdlib.h>
|
2014-01-09 16:20:18 +01:00
|
|
|
#include <metalsvm/memory.h>
|
2011-02-24 09:36:05 +01:00
|
|
|
#include <metalsvm/vma.h>
|
2010-12-10 06:16:58 +00:00
|
|
|
#include <metalsvm/string.h>
|
|
|
|
#include <metalsvm/spinlock.h>
|
|
|
|
#include <metalsvm/processor.h>
|
|
|
|
#include <metalsvm/tasks.h>
|
|
|
|
#include <metalsvm/errno.h>
|
2014-02-07 11:01:10 +01:00
|
|
|
#include <metalsvm/page.h>
|
|
|
|
#include <asm/page_helpers.h>
|
2010-12-10 06:16:58 +00:00
|
|
|
#include <asm/irq.h>
|
|
|
|
#include <asm/multiboot.h>
|
2011-07-18 15:51:26 +02:00
|
|
|
#include <asm/apic.h>
|
2010-12-10 06:16:58 +00:00
|
|
|
|
2014-05-14 15:11:02 +02:00
|
|
|
/**
|
|
|
|
* @author Steffen Vogel <steffen.vogel@rwth-aachen.de>
|
|
|
|
*/
|
|
|
|
|
2010-12-10 06:16:58 +00:00
|
|
|
/*
|
|
|
|
* Virtual Memory Layout of the standard configuration
|
|
|
|
* (1 GB kernel space)
|
|
|
|
*
|
2014-02-18 13:08:22 +01:00
|
|
|
* 0x0000000000000000 - 0x00000000000FFFFF: reserved for IO devices (16MB)
|
|
|
|
* 0x0000000000100000 - 0x00000000008C2000: Kernel (~8MB)
|
|
|
|
* 0x00000000008c3000 - 0x0000000000973000: Init Ramdisk (~2MB)
|
2014-01-09 16:52:03 +01:00
|
|
|
*
|
2014-02-18 13:08:22 +01:00
|
|
|
* 0x0001000000000000 - 0xffff000000000000: Memory hole (48 bit VAS limitation)
|
2013-12-03 16:34:34 +01:00
|
|
|
*
|
2014-02-18 13:08:22 +01:00
|
|
|
* 0xFFFFFE8000000000 - 0xFFFFFEFFFFFFFFFF: Page map dest for copy_page_map() (512GB)
|
|
|
|
* 0xFFFFFF0000000000 - 0xFFFFFF7FFFFFFFFF: Page map source for copy_page_map() (512GB)
|
|
|
|
* 0xFFFFFF8000000000 - 0xFFFFFFFFFFFFFFFF: Self-referenced page maps of the current task (512GB)
|
2013-12-03 16:34:34 +01:00
|
|
|
*/
|
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
/// Boot task's page map (setup by entryXX.asm)
|
|
|
|
extern page_entry_t boot_pml4[PAGE_MAP_ENTRIES];
|
2013-12-03 16:34:34 +01:00
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
/// Kernel space page map lock
|
|
|
|
static spinlock_t kslock = SPINLOCK_INIT;
|
2014-01-09 13:44:20 +01:00
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
/// Mapping of self referenced page map (at the end of the VAS)
|
2014-05-14 18:56:15 +02:00
|
|
|
// TODO: find a more generic initialization
|
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
static page_entry_t* const current_map = (page_entry_t*) (1 * PAGE_MAP_PGD);
|
|
|
|
static page_entry_t* const src_map = (page_entry_t*) (2 * PAGE_MAP_PGD);
|
|
|
|
static page_entry_t* const dest_map = (page_entry_t*) (3 * PAGE_MAP_PGD);
|
|
|
|
#elif defined(CONFIG_X86_64)
|
2014-05-14 15:13:11 +02:00
|
|
|
static page_entry_t* const current_map = (page_entry_t*) (1 * PAGE_MAP_PML4);
|
|
|
|
static page_entry_t* const src_map = (page_entry_t*) (2 * PAGE_MAP_PML4);
|
|
|
|
static page_entry_t* const dest_map = (page_entry_t*) (3 * PAGE_MAP_PML4);
|
2014-05-14 18:56:15 +02:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
static page_entry_t boot_pgd[PAGE_MAP_ENTRIES];
|
|
|
|
#endif
|
2013-12-03 16:34:34 +01:00
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
page_entry_t* get_boot_page_map(void)
|
|
|
|
{
|
2014-05-14 18:56:15 +02:00
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
return boot_pgd;
|
|
|
|
#elif defined(CONFIG_X86_64)
|
2014-02-18 13:08:22 +01:00
|
|
|
return boot_pml4;
|
2014-05-14 18:56:15 +02:00
|
|
|
#endif
|
2013-12-03 16:34:34 +01:00
|
|
|
}
|
2013-12-03 16:37:53 +01:00
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
void page_dump(size_t mask)
|
2013-12-03 16:37:53 +01:00
|
|
|
{
|
2014-01-09 12:49:04 +01:00
|
|
|
task_t* task = per_core(current_task);
|
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
mask |= PG_PRESENT;
|
|
|
|
|
2014-01-09 12:49:04 +01:00
|
|
|
size_t flags = 0;
|
2013-12-03 16:37:53 +01:00
|
|
|
size_t start = 0;
|
2014-02-18 13:08:22 +01:00
|
|
|
size_t end;
|
2013-12-03 16:37:53 +01:00
|
|
|
|
2014-01-09 12:49:04 +01:00
|
|
|
void print(size_t start, size_t end, size_t flags) {
|
2013-12-03 16:37:53 +01:00
|
|
|
size_t size = end - start;
|
|
|
|
|
2014-01-09 12:49:04 +01:00
|
|
|
kprintf("%#018lx-%#018lx %#14x %c%c%c%c%c%c\n", start, end, size,
|
2014-02-18 13:08:22 +01:00
|
|
|
(mask & flags & PG_XD) ? '-' : 'x',
|
|
|
|
(mask & flags & PG_GLOBAL) ? 'g' : '-',
|
|
|
|
(mask & flags & PG_DIRTY) ? 'd' : '-',
|
|
|
|
(mask & flags & PG_ACCESSED) ? 'a' : '-',
|
|
|
|
(mask & flags & PG_USER) ? 'u' : '-',
|
|
|
|
(mask & flags & PG_RW) ? 'w' : '-'
|
2013-12-03 16:37:53 +01:00
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
void traverse(int level, page_entry_t* entry) {
|
|
|
|
page_entry_t* stop = entry + PAGE_MAP_ENTRIES;
|
|
|
|
for (; entry != stop; entry++) {
|
|
|
|
if (*entry & PG_PRESENT) {
|
|
|
|
if (level && !(*entry & PG_PSE)) // do "pre-order" traversal
|
|
|
|
// TODO: handle "inheritance" of page table flags (see get_page_flags())
|
|
|
|
traverse(level-1, get_child_entry(entry));
|
|
|
|
else {
|
|
|
|
if (!flags) {
|
|
|
|
flags = *entry & ~PAGE_MASK & mask;
|
|
|
|
start = entry_to_virt(entry, level);
|
|
|
|
}
|
|
|
|
else if (flags != (*entry & ~PAGE_MASK & mask)) {
|
|
|
|
end = entry_to_virt(entry, level);
|
|
|
|
print(start, end, flags);
|
2013-12-03 16:37:53 +01:00
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
flags = *entry & ~PAGE_MASK & mask;
|
|
|
|
start = end;
|
|
|
|
}
|
2013-12-03 16:37:53 +01:00
|
|
|
}
|
|
|
|
}
|
2014-02-18 13:08:22 +01:00
|
|
|
else if (flags) {
|
|
|
|
end = entry_to_virt(entry, level);
|
|
|
|
print(start, end, flags);
|
|
|
|
flags = 0;
|
|
|
|
}
|
2013-12-03 16:37:53 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-09 12:49:04 +01:00
|
|
|
// lock tables
|
|
|
|
spinlock_lock(&kslock);
|
|
|
|
spinlock_irqsave_lock(&task->page_lock);
|
|
|
|
|
|
|
|
kprintf("%-18s-%18s %14s %-6s\n", "start", "end", "size", "flags"); // header
|
2014-02-18 13:08:22 +01:00
|
|
|
|
|
|
|
traverse(PAGE_MAP_LEVELS-1, current_map);
|
|
|
|
|
|
|
|
if (flags) // workaround to print last mapping
|
|
|
|
print(start, 0L, flags);
|
2013-12-03 16:37:53 +01:00
|
|
|
|
2014-01-09 12:49:04 +01:00
|
|
|
// unlock tables
|
|
|
|
spinlock_irqsave_unlock(&task->page_lock);
|
2014-02-18 13:08:22 +01:00
|
|
|
spinlock_unlock(&kslock);
|
2013-12-03 16:37:53 +01:00
|
|
|
}
|
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
void page_stats(int reset)
|
2013-12-03 16:37:53 +01:00
|
|
|
{
|
2014-01-09 12:49:04 +01:00
|
|
|
task_t* task = per_core(current_task);
|
|
|
|
|
|
|
|
int i, stats[13] = { 0 };
|
|
|
|
const char* labels[] = { [0] = "present", "writable", "user accessable", "write through", "cache disabled", // IA-32 "legacy" bits
|
|
|
|
"accessed", "dirty", "huge pages", "global", "svm", "svm lazy", "svm init",
|
|
|
|
[12] = "exec disabled" // IA-32e / PAE bits
|
|
|
|
};
|
2013-12-03 16:37:53 +01:00
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
void traverse(int level, page_entry_t* entry) {
|
|
|
|
page_entry_t* stop = entry + PAGE_MAP_ENTRIES;
|
|
|
|
for (; entry != stop; entry++) {
|
|
|
|
if (*entry & PG_PRESENT) {
|
|
|
|
if (level && !(*entry & PG_PSE))
|
|
|
|
traverse(level-1, get_child_entry(entry));
|
|
|
|
else {
|
|
|
|
// increment stat counters
|
|
|
|
int i;
|
|
|
|
for (i=0; i<12; i++) { // IA-32 "legacy" bits
|
|
|
|
if (*entry & (1 << i))
|
|
|
|
stats[i]++;
|
|
|
|
}
|
2014-05-14 15:13:11 +02:00
|
|
|
#ifdef CONFIG_X86_64
|
2014-02-18 13:08:22 +01:00
|
|
|
for (i=0; i<1; i++) { // IA-32e / PAE bits
|
|
|
|
if (*entry & (1 << (63-i)))
|
|
|
|
stats[i+PAGE_BITS]++;
|
|
|
|
}
|
2014-05-14 15:13:11 +02:00
|
|
|
#endif
|
2013-12-03 16:37:53 +01:00
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
if (reset) { // reset accessed and dirty bits
|
|
|
|
*entry &= ~(PG_ACCESSED|PG_DIRTY);
|
|
|
|
tlb_flush_one_page(entry_to_virt(entry, level)); // see IA32 Vol3 4.8
|
|
|
|
}
|
|
|
|
}
|
2013-12-03 16:37:53 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-09 12:49:04 +01:00
|
|
|
// lock tables
|
|
|
|
spinlock_lock(&kslock);
|
|
|
|
spinlock_irqsave_lock(&task->page_lock);
|
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
traverse(PAGE_MAP_LEVELS-1, current_map);
|
2013-12-03 16:37:53 +01:00
|
|
|
|
2014-01-09 12:49:04 +01:00
|
|
|
// unlock tables
|
|
|
|
spinlock_irqsave_unlock(&task->page_lock);
|
2014-02-18 13:08:22 +01:00
|
|
|
spinlock_unlock(&kslock);
|
2014-01-09 12:49:04 +01:00
|
|
|
|
2013-12-03 16:37:53 +01:00
|
|
|
kprintf("total pages:\n");
|
2014-01-09 12:49:04 +01:00
|
|
|
for (i=0; i<13; i++)
|
2013-12-03 16:37:53 +01:00
|
|
|
kprintf(" - %s:%*lu\n", labels[i], 25-strlen(labels[i]), stats[i]);
|
|
|
|
}
|
|
|
|
|
2014-01-09 13:44:20 +01:00
|
|
|
int copy_page_map(task_t* new_task, int copy)
|
2010-12-15 12:08:37 +00:00
|
|
|
{
|
2014-01-09 13:44:20 +01:00
|
|
|
task_t* cur_task = per_core(current_task);
|
2013-11-14 13:09:31 +01:00
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
int traverse(int level, page_entry_t* src, page_entry_t* dest) {
|
|
|
|
page_entry_t* stop = src + PAGE_MAP_ENTRIES;
|
|
|
|
for (; src != stop; src++, dest++) {
|
|
|
|
if (*src & PG_PRESENT) {
|
|
|
|
if (*src & PG_USER) { // deep copy page frame
|
2014-01-09 13:44:20 +01:00
|
|
|
size_t phyaddr = get_page();
|
|
|
|
if (BUILTIN_EXPECT(!phyaddr, 0))
|
|
|
|
return -ENOMEM;
|
2013-11-14 13:08:56 +01:00
|
|
|
|
2014-01-09 13:44:20 +01:00
|
|
|
atomic_int32_inc(&cur_task->user_usage);
|
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
copy_page(phyaddr, *src & PAGE_MASK);
|
|
|
|
*dest = phyaddr | (*src & ~PAGE_MASK);
|
|
|
|
|
|
|
|
// do "pre-order" traversal
|
|
|
|
if (level && !(*src & PG_PSE)) {
|
|
|
|
int ret = traverse(level-1, get_child_entry(src),
|
|
|
|
get_child_entry(dest));
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
2013-11-14 13:08:56 +01:00
|
|
|
}
|
2014-02-18 13:08:22 +01:00
|
|
|
else // shallow copy kernel table
|
|
|
|
*dest = *src;
|
2013-11-14 13:08:56 +01:00
|
|
|
}
|
2014-02-18 13:08:22 +01:00
|
|
|
else // table does not exists
|
|
|
|
*dest = 0;
|
2013-11-14 13:08:56 +01:00
|
|
|
}
|
2014-01-09 16:52:03 +01:00
|
|
|
|
2014-01-09 13:44:20 +01:00
|
|
|
return 0;
|
2013-11-14 13:08:56 +01:00
|
|
|
}
|
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
|
|
|
|
page_entry_t* src_virt = (copy) ? cur_task->page_map : get_boot_page_map();
|
|
|
|
page_entry_t* dest_virt = (page_entry_t*) palloc(PAGE_SIZE, MAP_KERNEL_SPACE);
|
|
|
|
if (BUILTIN_EXPECT(!dest_virt, 0))
|
2014-01-09 13:44:20 +01:00
|
|
|
return -ENOMEM;
|
2013-11-14 12:25:52 +01:00
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
size_t src_phys = virt_to_phys((size_t) src_virt);
|
|
|
|
size_t dest_phys = virt_to_phys((size_t) dest_virt);
|
2013-11-14 12:25:52 +01:00
|
|
|
|
2014-01-09 13:44:20 +01:00
|
|
|
// lock tables
|
|
|
|
spinlock_lock(&kslock);
|
|
|
|
spinlock_irqsave_lock(&cur_task->page_lock);
|
2013-11-14 12:25:52 +01:00
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
kprintf("copy_page_map: copy = %u, src = %p (%p, %p), dest = %p (%p, %p)\n",
|
|
|
|
copy, src_virt, src_phys, src_map, dest_virt, dest_phys, dest_map); // TODO: remove
|
|
|
|
|
|
|
|
// temporary map src and dest tables
|
|
|
|
current_map[PAGE_MAP_ENTRIES-2] = (src_phys & PAGE_MASK) | (PG_TABLE & ~PG_RW); // source is read-only!
|
|
|
|
current_map[PAGE_MAP_ENTRIES-3] = (dest_phys & PAGE_MASK) | PG_TABLE;
|
|
|
|
|
2014-04-15 16:28:02 +02:00
|
|
|
//tlb_flush(); // ouch :(
|
2013-11-14 12:25:52 +01:00
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
int ret = traverse(PAGE_MAP_LEVELS-1, src_map, dest_map);
|
|
|
|
|
2014-01-09 13:44:20 +01:00
|
|
|
// setup self reference for new table
|
2014-02-18 13:08:22 +01:00
|
|
|
dest_map[PAGE_MAP_ENTRIES-1] = dest_phys | PG_TABLE;
|
2014-01-09 13:44:20 +01:00
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
// unmap temporary tables
|
|
|
|
current_map[PAGE_MAP_ENTRIES-2] = 0;
|
|
|
|
current_map[PAGE_MAP_ENTRIES-3] = 0;
|
2014-04-15 16:28:02 +02:00
|
|
|
dest_map[PAGE_MAP_ENTRIES-2] = 0;
|
|
|
|
dest_map[PAGE_MAP_ENTRIES-3] = 0;
|
2014-02-18 13:08:22 +01:00
|
|
|
tlb_flush(); // ouch :(
|
2014-01-09 13:44:20 +01:00
|
|
|
|
|
|
|
// unlock tables
|
|
|
|
spinlock_irqsave_unlock(&cur_task->page_lock);
|
2013-11-14 12:25:52 +01:00
|
|
|
spinlock_unlock(&kslock);
|
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
new_task->page_map = dest_virt;
|
2013-11-14 12:25:52 +01:00
|
|
|
|
|
|
|
return ret;
|
2011-02-24 18:32:58 +01:00
|
|
|
}
|
|
|
|
|
2013-10-11 16:21:53 +02:00
|
|
|
int drop_page_map(void)
|
2011-02-24 18:32:58 +01:00
|
|
|
{
|
2011-03-04 11:38:40 +01:00
|
|
|
task_t* task = per_core(current_task);
|
2011-02-24 18:32:58 +01:00
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
void traverse(int level, page_entry_t* entry) {
|
|
|
|
page_entry_t* stop = entry + PAGE_MAP_ENTRIES;
|
|
|
|
for (; entry != stop; entry++) {
|
|
|
|
if (*entry & PG_PRESENT) {
|
|
|
|
// do "post-order" traversal
|
|
|
|
if (level && !(*entry & PG_PSE))
|
|
|
|
traverse(level-1, get_child_entry(entry));
|
|
|
|
|
|
|
|
if (*entry & PG_USER) {
|
|
|
|
kprintf("drop_page_map: entry = %p. level = %u\n", entry, level);
|
|
|
|
if (put_page(*entry & PAGE_MASK))
|
|
|
|
atomic_int32_dec(&task->user_usage);
|
|
|
|
}
|
|
|
|
}
|
2011-03-04 11:38:40 +01:00
|
|
|
}
|
2014-01-09 13:44:20 +01:00
|
|
|
}
|
2011-02-24 18:32:58 +01:00
|
|
|
|
2014-01-09 13:44:20 +01:00
|
|
|
kprintf("drop_page_map: task = %u\n", task->id); // TODO: remove
|
2011-02-24 18:32:58 +01:00
|
|
|
|
2014-01-09 13:44:20 +01:00
|
|
|
// check assertions
|
|
|
|
if (BUILTIN_EXPECT(task->page_map == get_boot_page_map(), 0))
|
|
|
|
return -EINVAL;
|
|
|
|
if (BUILTIN_EXPECT(!task || !task->page_map, 0))
|
|
|
|
return -EINVAL;
|
2010-12-15 12:08:37 +00:00
|
|
|
|
2014-01-09 13:44:20 +01:00
|
|
|
// lock tables
|
|
|
|
spinlock_irqsave_lock(&task->page_lock);
|
2010-12-10 06:16:58 +00:00
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
kprintf("user_usage: %u (task = %u)\n", atomic_int32_read(&task->user_usage), task->id);
|
|
|
|
|
|
|
|
traverse(PAGE_MAP_LEVELS-1, current_map);
|
2010-12-10 06:16:58 +00:00
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
put_page((size_t) task->page_map);
|
|
|
|
|
|
|
|
// we replace the page table
|
|
|
|
task->page_map = get_boot_page_map();
|
|
|
|
tlb_flush();
|
2010-12-10 06:16:58 +00:00
|
|
|
|
2014-01-09 13:44:20 +01:00
|
|
|
// unlock tables
|
|
|
|
spinlock_irqsave_unlock(&task->page_lock);
|
2010-12-10 06:16:58 +00:00
|
|
|
|
2014-01-09 13:44:20 +01:00
|
|
|
return 0;
|
|
|
|
}
|
2010-12-10 06:16:58 +00:00
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
int set_page_flags(size_t viraddr, uint32_t npages, int flags)
|
2014-01-09 13:44:20 +01:00
|
|
|
{
|
|
|
|
task_t* task = per_core(current_task);
|
2014-02-18 13:08:22 +01:00
|
|
|
page_entry_t* first[PAGE_MAP_LEVELS];
|
|
|
|
page_entry_t* last[PAGE_MAP_LEVELS];
|
2010-12-10 06:16:58 +00:00
|
|
|
|
2014-01-09 13:44:20 +01:00
|
|
|
size_t bits = page_bits(flags);
|
|
|
|
size_t start = viraddr;
|
|
|
|
size_t end = start + npages * PAGE_SIZE;
|
2010-12-10 06:16:58 +00:00
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
void traverse(int level, page_entry_t* entry) {
|
|
|
|
page_entry_t* stop = entry + PAGE_MAP_ENTRIES;
|
|
|
|
for (; entry != stop; entry++) {
|
|
|
|
if (entry < last[level] && entry >= first[level]) {
|
|
|
|
if ((*entry & PG_PRESENT) && !(*entry & PG_PSE)) {
|
|
|
|
if (level) {
|
|
|
|
if (flags & MAP_USER_SPACE)
|
|
|
|
*entry |= PG_USER;
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
if (flags & MAP_CODE)
|
|
|
|
*entry &= ~PG_XD;
|
|
|
|
#endif
|
2011-02-08 18:37:56 +00:00
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
// do "pre-order" traversal
|
|
|
|
traverse(level-1, get_child_entry(entry));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
*entry = (*entry & PAGE_MASK) | bits;
|
2011-11-16 03:12:09 -08:00
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
tlb_flush_one_page(entry_to_virt(entry, level));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-01-09 13:44:20 +01:00
|
|
|
}
|
2010-12-10 06:16:58 +00:00
|
|
|
|
2014-01-09 13:44:20 +01:00
|
|
|
// check assertions
|
|
|
|
if (BUILTIN_EXPECT(!task || !task->page_map, 0))
|
|
|
|
return 0;
|
2011-09-03 13:25:49 -07:00
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
// calc page tree boundaries
|
|
|
|
int i;
|
|
|
|
for (i=0; i<PAGE_MAP_LEVELS; i++) {
|
|
|
|
first[i] = virt_to_entry(start, i);
|
|
|
|
last[i] = virt_to_entry(end - 1, i) + 1; // exclusive
|
|
|
|
}
|
|
|
|
|
2014-01-09 13:44:20 +01:00
|
|
|
// lock tables
|
2014-02-18 13:08:22 +01:00
|
|
|
if (start < KERNEL_SPACE)
|
2014-01-09 13:44:20 +01:00
|
|
|
spinlock_lock(&kslock);
|
2014-02-18 13:08:22 +01:00
|
|
|
if (end >= KERNEL_SPACE)
|
2014-01-09 13:44:20 +01:00
|
|
|
spinlock_irqsave_lock(&task->page_lock);
|
2011-03-04 22:44:53 +01:00
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
traverse(PAGE_MAP_LEVELS-1, current_map);
|
2011-03-04 11:38:40 +01:00
|
|
|
|
2014-01-09 13:44:20 +01:00
|
|
|
// unlock tables
|
2014-02-18 13:08:22 +01:00
|
|
|
if (start < KERNEL_SPACE)
|
|
|
|
spinlock_unlock(&kslock);
|
|
|
|
if (end >= KERNEL_SPACE)
|
|
|
|
spinlock_irqsave_unlock(&task->page_lock);
|
2011-02-24 19:06:32 +01:00
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
return 0;
|
2010-12-10 06:16:58 +00:00
|
|
|
}
|
|
|
|
|
2014-01-09 13:44:20 +01:00
|
|
|
size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flags)
|
2011-02-24 09:36:05 +01:00
|
|
|
{
|
2011-03-04 11:38:40 +01:00
|
|
|
task_t* task = per_core(current_task);
|
2014-02-18 13:08:22 +01:00
|
|
|
page_entry_t* first[PAGE_MAP_LEVELS];
|
|
|
|
page_entry_t* last[PAGE_MAP_LEVELS];
|
2011-02-24 09:36:05 +01:00
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
// TODO: this behaviour should be deprecated
|
2014-01-09 13:44:20 +01:00
|
|
|
if (!viraddr) {
|
|
|
|
int vma_flags = VMA_HEAP;
|
|
|
|
if (flags & MAP_USER_SPACE)
|
|
|
|
vma_flags |= VMA_USER;
|
2011-02-24 09:36:05 +01:00
|
|
|
|
2014-01-09 13:44:20 +01:00
|
|
|
viraddr = vma_alloc(npages * PAGE_SIZE, vma_flags);
|
|
|
|
}
|
2011-02-24 09:36:05 +01:00
|
|
|
|
2014-01-09 13:44:20 +01:00
|
|
|
size_t bits = page_bits(flags);
|
|
|
|
size_t start = viraddr;
|
|
|
|
size_t end = start + npages * PAGE_SIZE;
|
2011-08-15 07:16:12 -07:00
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
int traverse(int level, page_entry_t* entry) {
|
|
|
|
page_entry_t* stop = entry + PAGE_MAP_ENTRIES;
|
|
|
|
for (; entry != stop; entry++) {
|
|
|
|
if (entry < last[level] && entry >= first[level]) {
|
|
|
|
if (level) { // PGD, PDPT, PML4..
|
|
|
|
if (*entry & PG_PRESENT) {
|
|
|
|
if ((flags & MAP_USER_SPACE) && !(*entry & PG_USER)) {
|
2014-04-15 16:28:02 +02:00
|
|
|
/* We are altering entries which cover
|
|
|
|
* the kernel. So before changing them we need to
|
2014-02-18 13:08:22 +01:00
|
|
|
* make a private copy for the task */
|
|
|
|
size_t phyaddr = get_page();
|
|
|
|
if (BUILTIN_EXPECT(!phyaddr, 0))
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
atomic_int32_inc(&task->user_usage);
|
|
|
|
|
|
|
|
copy_page(phyaddr, *entry & PAGE_MASK);
|
2014-04-15 16:27:16 +02:00
|
|
|
*entry = phyaddr | (*entry & ~PAGE_MASK);
|
|
|
|
*entry &= ~PG_GLOBAL;
|
|
|
|
*entry |= PG_USER;
|
2014-02-18 13:08:22 +01:00
|
|
|
|
|
|
|
/* We just need to flush the table itself.
|
|
|
|
* TLB entries for the kernel remain valid
|
|
|
|
* because we've not changed them. */
|
|
|
|
tlb_flush_one_page(entry_to_virt(entry, 0));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
2014-04-15 16:28:02 +02:00
|
|
|
/* Theres no page map table available
|
|
|
|
* which covers the region. Therefore we will create a
|
|
|
|
* new table. */
|
2014-01-09 13:44:20 +01:00
|
|
|
size_t phyaddr = get_page();
|
|
|
|
if (BUILTIN_EXPECT(!phyaddr, 0))
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
if (flags & MAP_USER_SPACE)
|
|
|
|
atomic_int32_inc(&task->user_usage);
|
2014-01-09 13:44:20 +01:00
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
*entry = phyaddr | bits;
|
2014-01-09 13:44:20 +01:00
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
memset(get_child_entry(entry), 0x00, PAGE_SIZE); // fill with zeros
|
2014-01-09 13:44:20 +01:00
|
|
|
}
|
2011-02-24 09:36:05 +01:00
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
// do "pre-order" traversal if no hugepage
|
|
|
|
if (!(*entry & PG_PSE)) {
|
|
|
|
int ret = traverse(level-1, get_child_entry(entry));
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else { // PGT
|
|
|
|
if ((*entry & PG_PRESENT) && !(flags & MAP_REMAP))
|
|
|
|
return -EINVAL;
|
2014-01-09 13:44:20 +01:00
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
*entry = phyaddr | bits;
|
2014-01-09 13:44:20 +01:00
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
if (flags & MAP_USER_SPACE)
|
|
|
|
atomic_int32_inc(&task->user_usage);
|
2014-01-09 13:44:20 +01:00
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
if (flags & MAP_REMAP)
|
|
|
|
tlb_flush_one_page(entry_to_virt(entry, level));
|
2011-02-24 09:36:05 +01:00
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
phyaddr += PAGE_SIZE;
|
|
|
|
}
|
|
|
|
}
|
2011-02-24 09:36:05 +01:00
|
|
|
}
|
2014-01-09 13:44:20 +01:00
|
|
|
|
|
|
|
return 0;
|
2011-02-24 09:36:05 +01:00
|
|
|
}
|
|
|
|
|
2014-01-09 13:44:20 +01:00
|
|
|
kprintf("map_region: map %u pages from %#lx to %#lx with flags: %#x\n", npages, viraddr, phyaddr, flags); // TODO: remove
|
2011-02-24 09:36:05 +01:00
|
|
|
|
2014-01-09 13:44:20 +01:00
|
|
|
if (BUILTIN_EXPECT(!task || !task->page_map, 0))
|
|
|
|
return 0;
|
2014-02-18 13:08:22 +01:00
|
|
|
|
|
|
|
// calc page tree boundaries
|
|
|
|
int i;
|
|
|
|
for (i=0; i<PAGE_MAP_LEVELS; i++) {
|
|
|
|
first[i] = virt_to_entry(start, i);
|
|
|
|
last[i] = virt_to_entry(end - 1, i) + 1; // exclusive
|
|
|
|
}
|
2014-01-09 13:44:20 +01:00
|
|
|
|
|
|
|
// lock tables
|
2014-02-18 13:08:22 +01:00
|
|
|
if (start < KERNEL_SPACE)
|
2014-01-09 13:44:20 +01:00
|
|
|
spinlock_lock(&kslock);
|
2014-02-18 13:08:22 +01:00
|
|
|
if (end >= KERNEL_SPACE)
|
2014-01-09 13:44:20 +01:00
|
|
|
spinlock_irqsave_lock(&task->page_lock);
|
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
int ret = traverse(PAGE_MAP_LEVELS-1, current_map);
|
2014-01-09 13:44:20 +01:00
|
|
|
|
|
|
|
// unlock tables
|
2014-02-18 13:08:22 +01:00
|
|
|
if (start < KERNEL_SPACE)
|
2014-01-09 13:44:20 +01:00
|
|
|
spinlock_unlock(&kslock);
|
2014-02-18 13:08:22 +01:00
|
|
|
if (end >= KERNEL_SPACE)
|
2014-01-09 13:44:20 +01:00
|
|
|
spinlock_irqsave_unlock(&task->page_lock);
|
2011-02-24 09:36:05 +01:00
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
return (ret) ? 0 : viraddr;
|
2014-01-09 13:44:20 +01:00
|
|
|
}
|
2010-12-10 06:16:58 +00:00
|
|
|
|
2011-03-04 22:44:53 +01:00
|
|
|
int unmap_region(size_t viraddr, uint32_t npages)
|
|
|
|
{
|
|
|
|
task_t* task = per_core(current_task);
|
2014-02-18 13:08:22 +01:00
|
|
|
page_entry_t* first[PAGE_MAP_LEVELS];
|
|
|
|
page_entry_t* last[PAGE_MAP_LEVELS];
|
2011-03-04 22:44:53 +01:00
|
|
|
|
2014-01-09 13:44:20 +01:00
|
|
|
size_t start = viraddr;
|
|
|
|
size_t end = start + npages * PAGE_SIZE;
|
2011-03-04 22:44:53 +01:00
|
|
|
|
2014-01-09 13:44:20 +01:00
|
|
|
kprintf("unmap_region: unmap %u pages from %#lx\n", npages, viraddr); // TODO: remove
|
2011-03-04 22:44:53 +01:00
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
/** @return number of page table entries which a present */
|
|
|
|
int traverse(int level, page_entry_t* entry) {
|
|
|
|
int used = 0;
|
|
|
|
page_entry_t* stop = entry + PAGE_MAP_ENTRIES;
|
|
|
|
for (; entry != stop; entry++) {
|
|
|
|
if (entry < last[level] && entry >= first[level]) {
|
|
|
|
if (level) { // PGD, PDPT, PML4
|
|
|
|
if ((*entry & PG_PRESENT) && !(*entry & PG_PSE)) {
|
|
|
|
// do "post-order" traversal if table is present and no hugepage
|
|
|
|
if (traverse(level-1, get_child_entry(entry)))
|
|
|
|
used++;
|
|
|
|
else { // child table is empty => delete it
|
|
|
|
*entry &= ~PG_PRESENT;
|
|
|
|
tlb_flush_one_page(entry_to_virt(entry, 0));
|
|
|
|
|
|
|
|
if (*entry & PG_USER) {
|
|
|
|
if (put_page(*entry & PAGE_MASK))
|
|
|
|
atomic_int32_dec(&task->user_usage);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else { // PGT
|
|
|
|
*entry &= ~PG_PRESENT;
|
2011-03-04 22:44:53 +01:00
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
tlb_flush_one_page(entry_to_virt(entry, level));
|
2014-01-09 13:44:20 +01:00
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
if (*entry & PG_USER)
|
|
|
|
atomic_int32_dec(&task->user_usage);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
if (*entry & PG_PRESENT)
|
|
|
|
used++;
|
2014-01-09 13:44:20 +01:00
|
|
|
}
|
|
|
|
}
|
2011-03-04 22:44:53 +01:00
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
return used;
|
2014-01-09 13:44:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (BUILTIN_EXPECT(!task || !task->page_map, 0))
|
|
|
|
return 0;
|
2013-10-11 16:21:53 +02:00
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
// calc page tree boundaries
|
|
|
|
int i;
|
|
|
|
for (i=0; i<PAGE_MAP_LEVELS; i++) {
|
|
|
|
first[i] = virt_to_entry(start, i);
|
|
|
|
last[i] = virt_to_entry(end - 1, i) + 1; // exclusive
|
|
|
|
}
|
|
|
|
|
2014-01-09 13:44:20 +01:00
|
|
|
// lock tables
|
2014-02-18 13:08:22 +01:00
|
|
|
if (start < KERNEL_SPACE)
|
2014-01-09 13:44:20 +01:00
|
|
|
spinlock_lock(&kslock);
|
2014-02-18 13:08:22 +01:00
|
|
|
if (end >= KERNEL_SPACE)
|
2014-01-09 13:44:20 +01:00
|
|
|
spinlock_irqsave_lock(&task->page_lock);
|
2011-03-04 22:44:53 +01:00
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
traverse(PAGE_MAP_LEVELS-1, current_map);
|
2011-03-04 22:44:53 +01:00
|
|
|
|
2014-01-09 13:44:20 +01:00
|
|
|
// unlock tables
|
2014-02-18 13:08:22 +01:00
|
|
|
if (start < KERNEL_SPACE)
|
2012-09-10 15:37:45 +02:00
|
|
|
spinlock_unlock(&kslock);
|
2014-02-18 13:08:22 +01:00
|
|
|
if (end > KERNEL_SPACE)
|
2013-10-11 16:21:53 +02:00
|
|
|
spinlock_irqsave_unlock(&task->page_lock);
|
2011-03-04 22:44:53 +01:00
|
|
|
|
2014-02-18 13:08:22 +01:00
|
|
|
return 0;
|
2011-03-04 22:44:53 +01:00
|
|
|
}
|
|
|
|
|
2010-12-10 06:16:58 +00:00
|
|
|
static void pagefault_handler(struct state *s)
|
|
|
|
{
|
2011-04-20 21:23:22 +02:00
|
|
|
task_t* task = per_core(current_task);
|
|
|
|
size_t viraddr = read_cr2();
|
|
|
|
|
2014-01-09 14:06:09 +01:00
|
|
|
// on demand userspace heap mapping
|
|
|
|
if ((task->heap) && (viraddr >= task->heap->start) && (viraddr < task->heap->end)) {
|
|
|
|
viraddr &= PAGE_MASK;
|
2013-12-03 15:26:21 +01:00
|
|
|
|
|
|
|
size_t phyaddr = get_page();
|
2014-01-09 14:06:09 +01:00
|
|
|
if (BUILTIN_EXPECT(!phyaddr, 0)) {
|
|
|
|
kprintf("out of memory: task = %u\n", task->id);
|
|
|
|
goto default_handler;
|
|
|
|
}
|
2013-11-14 13:12:35 +01:00
|
|
|
|
2014-01-09 14:06:09 +01:00
|
|
|
viraddr = map_region(viraddr, phyaddr, 1, MAP_USER_SPACE);
|
|
|
|
if (BUILTIN_EXPECT(!viraddr, 0)) {
|
|
|
|
kprintf("map_region: could not map %#lx to %#lx, task = %u\n", viraddr, phyaddr, task->id);
|
|
|
|
put_page(phyaddr);
|
2013-11-14 13:12:35 +01:00
|
|
|
|
2014-01-09 14:06:09 +01:00
|
|
|
goto default_handler;
|
|
|
|
}
|
2013-11-14 13:12:35 +01:00
|
|
|
|
2014-01-09 14:06:09 +01:00
|
|
|
memset((void*) viraddr, 0x00, PAGE_SIZE); // fill with zeros
|
2013-11-14 13:12:35 +01:00
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
2011-08-16 03:29:54 -07:00
|
|
|
|
2014-01-09 13:32:00 +01:00
|
|
|
default_handler:
|
2014-05-14 18:56:15 +02:00
|
|
|
kprintf("Page Fault Exception (%d) at cs:ip = %#x:%#lx, core = %u, task = %u, addr = %#lx, error = %#x [ %s %s %s %s %s ]\n",
|
|
|
|
s->int_no, s->cs,
|
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
s->eip,
|
|
|
|
#elif defined(CONFIG_X86_64)
|
|
|
|
s->rip,
|
|
|
|
#endif
|
|
|
|
CORE_ID, task->id, viraddr, s->error,
|
2014-01-09 13:32:00 +01:00
|
|
|
(s->error & 0x4) ? "user" : "supervisor",
|
|
|
|
(s->error & 0x10) ? "instruction" : "data",
|
|
|
|
(s->error & 0x2) ? "write" : ((s->error & 0x10) ? "fetch" : "read"),
|
|
|
|
(s->error & 0x1) ? "protection" : "not present",
|
2014-05-14 18:56:15 +02:00
|
|
|
(s->error & 0x8) ? "reserved bit" : "\b");
|
|
|
|
|
|
|
|
// TODO: move this to something like print_registers()
|
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
kprintf("Register state: eflags = %#lx, eax = %#lx, ebx = %#lx, ecx = %#lx, edx = %#lx, edi = %#lx, esi = %#lx, ebp = %#llx, esp = %#lx\n",
|
|
|
|
s->eflags, s->eax, s->ebx, s->ecx, s->edx, s->edi, s->esi, s->ebp, s->esp);
|
|
|
|
#elif defined(CONFIG_X86_64)
|
|
|
|
kprintf("Register state: rflags = %#lx, rax = %#lx, rbx = %#lx, rcx = %#lx, rdx = %#lx, rdi = %#lx, rsi = %#lx, rbp = %#llx, rsp = %#lx\n",
|
2014-01-09 13:32:00 +01:00
|
|
|
s->rflags, s->rax, s->rbx, s->rcx, s->rdx, s->rdi, s->rsi, s->rbp, s->rsp);
|
2014-05-14 18:56:15 +02:00
|
|
|
#endif
|
2013-11-14 13:12:35 +01:00
|
|
|
|
2011-08-02 06:19:26 -07:00
|
|
|
irq_enable();
|
2010-12-10 06:16:58 +00:00
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
|
|
|
int arch_paging_init(void)
|
|
|
|
{
|
2012-06-10 08:05:24 +02:00
|
|
|
uint32_t i, npages;
|
2010-12-10 06:16:58 +00:00
|
|
|
|
2013-11-20 12:03:24 +01:00
|
|
|
// replace default pagefault handler
|
2010-12-10 06:16:58 +00:00
|
|
|
irq_uninstall_handler(14);
|
|
|
|
irq_install_handler(14, pagefault_handler);
|
|
|
|
|
2013-12-03 15:52:16 +01:00
|
|
|
// setup recursive paging
|
2014-02-18 13:08:22 +01:00
|
|
|
page_entry_t* boot_map = get_boot_page_map();
|
|
|
|
boot_map[PAGE_MAP_ENTRIES-1] = (size_t) boot_map | PG_TABLE;
|
2013-12-03 15:52:16 +01:00
|
|
|
|
2013-11-14 12:22:52 +01:00
|
|
|
/*
|
|
|
|
* In longmode the kernel is already maped into the kernel space (see entry64.asm)
|
|
|
|
* this includes .data, .bss, .text, VGA, the multiboot & multiprocessing (APIC) structures
|
|
|
|
*/
|
2010-12-10 06:16:58 +00:00
|
|
|
|
2011-07-18 09:13:33 +02:00
|
|
|
#if MAX_CORES > 1
|
2013-11-20 12:03:24 +01:00
|
|
|
// reserve page for smp boot code
|
2014-01-09 12:49:04 +01:00
|
|
|
if (!map_region(SMP_SETUP_ADDR, SMP_SETUP_ADDR, 1, MAP_NO_CACHE | MAP_REMAP)) {
|
2011-07-18 09:13:33 +02:00
|
|
|
kputs("could not reserve page for smp boot code\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2010-12-10 06:16:58 +00:00
|
|
|
#ifdef CONFIG_MULTIBOOT
|
2011-04-07 20:36:43 +02:00
|
|
|
#if 0
|
2013-11-20 12:03:24 +01:00
|
|
|
// map reserved memory regions into the kernel space
|
2011-02-16 22:35:46 +01:00
|
|
|
if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MEM_MAP)) {
|
2010-12-10 06:16:58 +00:00
|
|
|
multiboot_memory_map_t* mmap = (multiboot_memory_map_t*) mb_info->mmap_addr;
|
|
|
|
multiboot_memory_map_t* mmap_end = (void*) ((size_t) mb_info->mmap_addr + mb_info->mmap_length);
|
|
|
|
|
|
|
|
while (mmap < mmap_end) {
|
|
|
|
if (mmap->type != MULTIBOOT_MEMORY_AVAILABLE) {
|
|
|
|
npages = mmap->len / PAGE_SIZE;
|
|
|
|
if ((mmap->addr+mmap->len) % PAGE_SIZE)
|
|
|
|
npages++;
|
2014-01-09 12:49:04 +01:00
|
|
|
map_region(mmap->addr, mmap->addr, npages, MAP_NO_CACHE | MAP_REMAP);
|
2010-12-10 06:16:58 +00:00
|
|
|
}
|
|
|
|
mmap++;
|
|
|
|
}
|
|
|
|
}
|
2011-04-07 20:36:43 +02:00
|
|
|
#endif
|
2010-12-10 06:16:58 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Modules like the init ram disk are already loaded.
|
2013-11-14 12:22:52 +01:00
|
|
|
* Therefore, we map these modules into the kernel space.
|
2010-12-10 06:16:58 +00:00
|
|
|
*/
|
2011-02-16 22:35:46 +01:00
|
|
|
if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MODS)) {
|
2012-05-24 10:49:45 +02:00
|
|
|
multiboot_module_t* mmodule = (multiboot_module_t*) ((size_t) mb_info->mods_addr);
|
2014-01-09 12:49:04 +01:00
|
|
|
npages = PAGE_FLOOR(mb_info->mods_count*sizeof(multiboot_module_t)) >> PAGE_BITS;
|
2010-12-10 06:16:58 +00:00
|
|
|
|
2014-01-09 12:49:04 +01:00
|
|
|
map_region((size_t) mmodule, (size_t) mmodule, npages, MAP_REMAP);
|
2012-02-02 22:54:09 +01:00
|
|
|
|
2010-12-10 06:16:58 +00:00
|
|
|
for(i=0; i<mb_info->mods_count; i++, mmodule++) {
|
|
|
|
// map physical address to the same virtual address
|
2014-01-09 12:49:04 +01:00
|
|
|
npages = PAGE_FLOOR(mmodule->mod_end - mmodule->mod_start) >> PAGE_BITS;
|
|
|
|
kprintf("Map module %s at %#x (%u pages)\n", (char*)(size_t) mmodule->cmdline, mmodule->mod_start, npages);
|
|
|
|
map_region((size_t) (mmodule->mod_start), (size_t) (mmodule->mod_start), npages, MAP_REMAP);
|
2010-12-10 06:16:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2013-11-20 12:03:24 +01:00
|
|
|
// we turned on paging => now, we are able to register our task
|
2012-06-10 23:40:22 +02:00
|
|
|
register_task();
|
2010-12-10 06:16:58 +00:00
|
|
|
|
2011-07-18 15:51:26 +02:00
|
|
|
// APIC registers into the kernel address space
|
|
|
|
map_apic();
|
|
|
|
|
2010-12-10 06:16:58 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2014-02-18 13:08:22 +01:00
|
|
|
|