add init code, which jumps to 64bit mode and and calls main
This commit is contained in:
parent
d21e707024
commit
ed2186ee03
9 changed files with 1907 additions and 5 deletions
|
@ -46,6 +46,7 @@ ARFLAGS = rsv
|
|||
LDFLAGS = -T link$(BIT).ld -z max-page-size=4096 --defsym __BUILD_DATE=$(shell date +'%Y%m%d') --defsym __BUILD_TIME=$(shell date +'%H%M%S')
|
||||
STRIP_DEBUG = --strip-debug
|
||||
KEEP_DEBUG = --only-keep-debug
|
||||
OUTPUT_FORMAT = -O elf32-i386
|
||||
CFLAGS_FOR_NEWLIB = -m32 -march=i586 -O2 $(STACKPROT)
|
||||
LDFLAGS_FOR_NEWLIB = -m32 -march=i586
|
||||
NASMFLAGS_FOR_NEWLIB = -felf32
|
||||
|
@ -83,7 +84,7 @@ $(NAME).elf:
|
|||
@echo [OBJCOPY] $(NAME).sym
|
||||
$Q$(OBJCOPY_FOR_TARGET) $(KEEP_DEBUG) $(NAME).elf $(NAME).sym
|
||||
@echo [OBJCOPY] $(NAME).elf
|
||||
$Q$(OBJCOPY_FOR_TARGET) $(STRIP_DEBUG) $(NAME).elf
|
||||
$Q$(OBJCOPY_FOR_TARGET) $(STRIP_DEBUG) $(OUTPUT_FORMAT) $(NAME).elf
|
||||
|
||||
qemu: newlib tools $(NAME).elf
|
||||
$(QEMU) -monitor stdio -smp 2 -net nic,model=rtl8139 -net user,hostfwd=tcp::12345-:4711 -net dump -kernel metalsvm.elf -initrd tools/initrd.img
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
C_source := gdt.c kb.c timer.c irq.c isrs.c idt.c vga.c multiboot.c apic.c pci.c processor.c
|
||||
ASM_source := entry.asm string$(BIT).asm
|
||||
ASM_source := entry$(BIT).asm string$(BIT).asm
|
||||
MODULE := arch_x86_kernel
|
||||
|
||||
include $(TOPDIR)/Makefile.inc
|
||||
|
|
1025
arch/x86/kernel/entry64.asm
Normal file
1025
arch/x86/kernel/entry64.asm
Normal file
File diff suppressed because it is too large
Load diff
|
@ -29,7 +29,7 @@
|
|||
gdt_ptr_t gp;
|
||||
tss_t task_state_segments[MAX_CORES] __attribute__ ((aligned (PAGE_SIZE)));
|
||||
static unsigned char kstacks[MAX_TASKS][KERNEL_STACK_SIZE] __attribute__ ((aligned (PAGE_SIZE))) = {[0 ... MAX_TASKS-1][0 ... KERNEL_STACK_SIZE-1] = 0xCD};
|
||||
size_t default_stack_pointer = (size_t) kstacks[0] + KERNEL_STACK_SIZE - sizeof(size_t);
|
||||
size_t default_stack_pointer = (size_t) kstacks[0] + KERNEL_STACK_SIZE - sizeof(size_t);
|
||||
// currently, our kernel has full access to the ioports
|
||||
static gdt_entry_t gdt[GDT_ENTRIES] = {[0 ... GDT_ENTRIES-1] = {0, 0, 0, 0, 0, 0}};
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
C_source := page.c svm.c
|
||||
C_source := page$(BIT).c svm.c
|
||||
MODULE := arch_x86_mm
|
||||
|
||||
include $(TOPDIR)/Makefile.inc
|
||||
|
|
871
arch/x86/mm/page64.c
Normal file
871
arch/x86/mm/page64.c
Normal file
|
@ -0,0 +1,871 @@
|
|||
/*
|
||||
* Copyright 2012 Stefan Lankes, Chair for Operating Systems,
|
||||
* RWTH Aachen University
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
* This file is part of MetalSVM.
|
||||
*/
|
||||
|
||||
#include <metalsvm/stddef.h>
|
||||
#include <metalsvm/stdio.h>
|
||||
#include <metalsvm/stdlib.h>
|
||||
#include <metalsvm/mmu.h>
|
||||
#include <metalsvm/vma.h>
|
||||
#include <metalsvm/string.h>
|
||||
#include <metalsvm/page.h>
|
||||
#include <metalsvm/spinlock.h>
|
||||
#include <metalsvm/processor.h>
|
||||
#include <metalsvm/tasks.h>
|
||||
#include <metalsvm/errno.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/multiboot.h>
|
||||
#include <asm/apic.h>
|
||||
#ifdef CONFIG_ROCKCREEK
|
||||
#include <asm/RCCE_lib.h>
|
||||
#include <asm/SCC_API.h>
|
||||
#include <asm/svm.h>
|
||||
#include <asm/icc.h>
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Virtual Memory Layout of the standard configuration
|
||||
* (1 GB kernel space)
|
||||
*
|
||||
* 0x00000000 - 0x000FFFFF: reserved for IO devices (16MB)
|
||||
* 0x00100000 - 0x0DEADFFF: Kernel (size depends on the configuration) (221MB)
|
||||
* 0x0DEAE000 - 0x3FFFEFFF: Kernel heap (801MB)
|
||||
* 0x3FFFF000 - 0x3FFFFFFF: Page Tables are mapped in this region (4KB)
|
||||
* (The last 256 entries belongs to kernel space)
|
||||
*/
|
||||
|
||||
/*
|
||||
* Note that linker symbols are not variables, they have no memory allocated for
|
||||
* maintaining a value, rather their address is their value.
|
||||
*/
|
||||
extern const void kernel_start;
|
||||
extern const void kernel_end;
|
||||
|
||||
// boot task's page directory and page directory lock
|
||||
static page_dir_t boot_pgd = {{[0 ... PGT_ENTRIES-1] = 0}};
|
||||
static spinlock_t kslock = SPINLOCK_INIT;
|
||||
//static int paging_enabled = 0;
|
||||
|
||||
page_dir_t* get_boot_pgd(void)
|
||||
{
|
||||
return &boot_pgd;
|
||||
}
|
||||
|
||||
#if 0
|
||||
/*
|
||||
* TODO: We create a full copy of the current task. Copy-On-Access will be the better solution.
|
||||
*
|
||||
* No PGD locking is needed because onls create_pgd use this function and holds already the
|
||||
* PGD lock.
|
||||
*/
|
||||
inline static size_t copy_page_table(task_t* task, uint32_t pgd_index, page_table_t* pgt, int* counter)
|
||||
{
|
||||
uint32_t i;
|
||||
page_table_t* new_pgt;
|
||||
size_t phyaddr;
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
if (BUILTIN_EXPECT(!pgt, 0))
|
||||
return 0;
|
||||
|
||||
new_pgt = kmalloc(sizeof(page_table_t));
|
||||
if (!new_pgt)
|
||||
return 0;
|
||||
memset(new_pgt, 0x00, sizeof(page_table_t));
|
||||
if (counter)
|
||||
(*counter)++;
|
||||
|
||||
for(i=0; i<1024; i++) {
|
||||
if (pgt->entries[i] & PAGE_MASK) {
|
||||
if (!(pgt->entries[i] & PG_USER)) {
|
||||
// Kernel page => copy only page entries
|
||||
new_pgt->entries[i] = pgt->entries[i];
|
||||
continue;
|
||||
}
|
||||
|
||||
phyaddr = get_page();
|
||||
if (!phyaddr)
|
||||
continue;
|
||||
if (counter)
|
||||
(*counter)++;
|
||||
|
||||
copy_page_physical((void*)phyaddr, (void*) (pgt->entries[i] & PAGE_MASK));
|
||||
|
||||
new_pgt->entries[i] = phyaddr | (pgt->entries[i] & 0xFFF);
|
||||
|
||||
atomic_int32_inc(&task->user_usage);
|
||||
}
|
||||
}
|
||||
|
||||
phyaddr = virt_to_phys((size_t)new_pgt);
|
||||
|
||||
return phyaddr;
|
||||
#else
|
||||
#warning Currently, not supported
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
int create_pgd(task_t* task, int copy)
|
||||
{
|
||||
#if 0
|
||||
page_dir_t* pgd;
|
||||
page_table_t* pgt;
|
||||
page_table_t* pgt_container;
|
||||
uint32_t i;
|
||||
uint32_t index1, index2;
|
||||
size_t viraddr, phyaddr;
|
||||
int counter = 0;
|
||||
task_t* curr_task = per_core(current_task);
|
||||
|
||||
if (BUILTIN_EXPECT(!paging_enabled, 0))
|
||||
return -EINVAL;
|
||||
|
||||
// we already know the virtual address of the "page table container"
|
||||
// (see file header)
|
||||
pgt_container = (page_table_t*) ((KERNEL_SPACE - PAGE_SIZE) & PAGE_MASK);
|
||||
|
||||
// create new page directory for the new task
|
||||
pgd = kmalloc(sizeof(page_dir_t));
|
||||
if (!pgd)
|
||||
return -ENOMEM;
|
||||
memset(pgd, 0x00, sizeof(page_dir_t));
|
||||
|
||||
// create a new "page table container" for the new task
|
||||
pgt = kmalloc(sizeof(page_table_t));
|
||||
if (!pgt) {
|
||||
kfree(pgd, sizeof(page_dir_t));
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(pgt, 0x00, sizeof(page_table_t));
|
||||
|
||||
spinlock_lock(&kslock);
|
||||
|
||||
for(i=0; i<1024; i++) {
|
||||
pgd->entries[i] = boot_pgd.entries[i];
|
||||
// only kernel entries will be copied
|
||||
if (pgd->entries[i] && !(pgd->entries[i] & PG_USER))
|
||||
pgt->entries[i] = pgt_container->entries[i];
|
||||
}
|
||||
|
||||
spinlock_unlock(&kslock);
|
||||
|
||||
// map page table container at the end of the kernel space
|
||||
viraddr = (KERNEL_SPACE - PAGE_SIZE) & PAGE_MASK;
|
||||
index1 = viraddr >> 22;
|
||||
index2 = (viraddr >> 12) & 0x3FF;
|
||||
|
||||
// now, we create a self reference
|
||||
pgd->entries[index1] = ((size_t) virt_to_phys((size_t) pgt) & PAGE_MASK)|KERN_TABLE;
|
||||
pgt->entries[index2] = ((size_t) virt_to_phys((size_t) pgt) & PAGE_MASK)|KERN_PAGE;
|
||||
|
||||
task->pgd = pgd;
|
||||
|
||||
if (copy) {
|
||||
spinlock_lock(&curr_task->pgd_lock);
|
||||
|
||||
for (i=KERNEL_SPACE/(1024*PAGE_SIZE); i<1024; i++) {
|
||||
if (!(curr_task->pgd->entries[i]))
|
||||
continue;
|
||||
if (!(curr_task->pgd->entries[i] & PG_USER))
|
||||
continue;
|
||||
|
||||
phyaddr = copy_page_table(task, i, (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + i*PAGE_SIZE) & PAGE_MASK), &counter);
|
||||
if (phyaddr) {
|
||||
pgd->entries[i] = (phyaddr & PAGE_MASK) | (curr_task->pgd->entries[i] & 0xFFF);
|
||||
pgt->entries[i] = (phyaddr & PAGE_MASK) | KERN_PAGE;
|
||||
}
|
||||
}
|
||||
|
||||
spinlock_unlock(&curr_task->pgd_lock);
|
||||
}
|
||||
|
||||
return counter;
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* drops all page frames and the PGD of a user task
|
||||
*/
|
||||
int drop_pgd(void)
|
||||
{
|
||||
#if 0
|
||||
page_dir_t* pgd = per_core(current_task)->pgd;
|
||||
size_t phy_pgd = virt_to_phys((size_t) pgd);
|
||||
task_t* task = per_core(current_task);
|
||||
uint32_t i;
|
||||
|
||||
if (BUILTIN_EXPECT(pgd == &boot_pgd, 0))
|
||||
return -EINVAL;
|
||||
|
||||
spinlock_lock(&task->pgd_lock);
|
||||
|
||||
for(i=0; i<1024; i++) {
|
||||
if (pgd->entries[i] & PG_USER) {
|
||||
put_page(pgd->entries[i] & PAGE_MASK);
|
||||
pgd->entries[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
// freeing the page directory
|
||||
put_page(phy_pgd);
|
||||
|
||||
task->pgd = NULL;
|
||||
|
||||
spinlock_unlock(&task->pgd_lock);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t virt_to_phys(size_t viraddr)
|
||||
{
|
||||
#if 0
|
||||
task_t* task = per_core(current_task);
|
||||
uint32_t index1, index2;
|
||||
page_table_t* pgt;
|
||||
size_t ret = 0;
|
||||
|
||||
if (!paging_enabled)
|
||||
return viraddr;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd, 0))
|
||||
return 0;
|
||||
|
||||
spinlock_lock(&task->pgd_lock);
|
||||
|
||||
index1 = viraddr >> 22;
|
||||
index2 = (viraddr >> 12) & 0x3FF;
|
||||
|
||||
if (!(task->pgd->entries[index1] & PAGE_MASK))
|
||||
goto out;
|
||||
|
||||
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
if (!pgt || !(pgt->entries[index2]))
|
||||
goto out;
|
||||
|
||||
ret = pgt->entries[index2] & PAGE_MASK; // determine page frame
|
||||
ret = ret | (viraddr & 0xFFF); // add page offset
|
||||
out:
|
||||
//kprintf("vir %p to phy %p\n", viraddr, ret);
|
||||
|
||||
spinlock_unlock(&task->pgd_lock);
|
||||
|
||||
return ret;
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flags)
|
||||
{
|
||||
#if 0
|
||||
task_t* task = per_core(current_task);
|
||||
spinlock_t* pgd_lock;
|
||||
page_table_t* pgt;
|
||||
size_t index, i;
|
||||
size_t ret;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd, 0))
|
||||
return 0;
|
||||
|
||||
if (BUILTIN_EXPECT(!paging_enabled && (viraddr != phyaddr), 0))
|
||||
return 0;
|
||||
|
||||
if (flags & MAP_KERNEL_SPACE)
|
||||
pgd_lock = &kslock;
|
||||
else
|
||||
pgd_lock = &task->pgd_lock;
|
||||
|
||||
spinlock_lock(pgd_lock);
|
||||
|
||||
if (!viraddr) {
|
||||
viraddr = vm_alloc(npages, flags);
|
||||
if (BUILTIN_EXPECT(!viraddr, 0)) {
|
||||
spinlock_unlock(pgd_lock);
|
||||
kputs("map_adress: found no valid virtual address\n");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
ret = viraddr;
|
||||
//kprintf("map %d pages from %p to %p\n", npages, phyaddr, ret);
|
||||
for(i=0; i<npages; i++, viraddr+=PAGE_SIZE, phyaddr+=PAGE_SIZE) {
|
||||
index = viraddr >> 22;
|
||||
|
||||
if (!(task->pgd->entries[index])) {
|
||||
page_table_t* pgt_container;
|
||||
|
||||
pgt = (page_table_t*) get_pages(1);
|
||||
if (BUILTIN_EXPECT(!pgt, 0)) {
|
||||
spinlock_unlock(pgd_lock);
|
||||
kputs("map_address: out of memory\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
// set the new page table into the directory
|
||||
if (flags & MAP_USER_SPACE)
|
||||
task->pgd->entries[index] = (size_t)pgt|USER_TABLE;
|
||||
else
|
||||
task->pgd->entries[index] = (size_t)pgt|KERN_TABLE;
|
||||
|
||||
// if paging is already enabled, we need to use the virtual address
|
||||
if (paging_enabled)
|
||||
// we already know the virtual address of the "page table container"
|
||||
// (see file header)
|
||||
pgt_container = (page_table_t*) ((KERNEL_SPACE - PAGE_SIZE) & PAGE_MASK);
|
||||
else
|
||||
pgt_container = (page_table_t*) (task->pgd->entries[(KERNEL_SPACE - PAGE_SIZE) >> 22] & PAGE_MASK);
|
||||
|
||||
if (BUILTIN_EXPECT(!pgt_container, 0)) {
|
||||
spinlock_unlock(pgd_lock);
|
||||
kputs("map_address: internal error\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
// map the new table into the address space of the kernel space
|
||||
pgt_container->entries[index] = ((size_t) pgt)|KERN_PAGE;
|
||||
|
||||
// clear the page table
|
||||
if (paging_enabled)
|
||||
memset((void*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index*PAGE_SIZE) & PAGE_MASK), 0x00, PAGE_SIZE);
|
||||
else
|
||||
memset(pgt, 0x00, PAGE_SIZE);
|
||||
} else pgt = (page_table_t*) (task->pgd->entries[index] & PAGE_MASK);
|
||||
|
||||
/* convert physical address to virtual */
|
||||
if (paging_enabled)
|
||||
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index*PAGE_SIZE) & PAGE_MASK);
|
||||
|
||||
index = (viraddr >> 12) & 0x3FF;
|
||||
if (pgt->entries[index] && !(flags & MAP_REMAP)) {
|
||||
spinlock_unlock(pgd_lock);
|
||||
kprintf("0x%x is already mapped\n", viraddr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (flags & MAP_USER_SPACE)
|
||||
pgt->entries[index] = USER_PAGE|(phyaddr & PAGE_MASK);
|
||||
else
|
||||
pgt->entries[index] = KERN_PAGE|(phyaddr & PAGE_MASK);
|
||||
|
||||
if (flags & MAP_NO_CACHE)
|
||||
pgt->entries[index] |= PG_PCD;
|
||||
#ifdef CONFIG_ROCKCREEK
|
||||
if (flags & MAP_MPE)
|
||||
pgt->entries[index] |= PG_MPE;
|
||||
#endif
|
||||
if (flags & MAP_SVM_STRONG)
|
||||
#ifndef SVM_WB
|
||||
pgt->entries[index] |= PG_SVM_STRONG|PG_PWT;
|
||||
#else
|
||||
pgt->entries[index] |= PG_SVM;
|
||||
#endif
|
||||
if (flags & MAP_SVM_LAZYRELEASE)
|
||||
pgt->entries[index] |= PG_SVM_LAZYRELEASE|PG_PWT;
|
||||
|
||||
if (flags & MAP_SVM_INIT)
|
||||
pgt->entries[index] |= PG_SVM_INIT;
|
||||
|
||||
if (flags & MAP_NO_ACCESS)
|
||||
pgt->entries[index] &= ~PG_PRESENT;
|
||||
|
||||
if (flags & MAP_WT)
|
||||
pgt->entries[index] |= PG_PWT;
|
||||
|
||||
if (flags & MAP_USER_SPACE)
|
||||
atomic_int32_inc(&task->user_usage);
|
||||
|
||||
tlb_flush_one_page(viraddr);
|
||||
}
|
||||
|
||||
spinlock_unlock(pgd_lock);
|
||||
|
||||
return ret;
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int change_page_permissions(size_t start, size_t end, uint32_t flags)
|
||||
{
|
||||
#if 0
|
||||
uint32_t index1, index2, newflags;
|
||||
size_t viraddr = start & PAGE_MASK;
|
||||
size_t phyaddr;
|
||||
page_table_t* pgt;
|
||||
page_dir_t* pgd;
|
||||
task_t* task = per_core(current_task);
|
||||
|
||||
if (BUILTIN_EXPECT(!paging_enabled, 0))
|
||||
return -EINVAL;
|
||||
|
||||
pgd = per_core(current_task)->pgd;
|
||||
if (BUILTIN_EXPECT(!pgd, 0))
|
||||
return -EINVAL;
|
||||
|
||||
spinlock_lock(&task->pgd_lock);
|
||||
|
||||
while (viraddr < end)
|
||||
{
|
||||
index1 = viraddr >> 22;
|
||||
index2 = (viraddr >> 12) & 0x3FF;
|
||||
|
||||
while ((viraddr < end) && (index2 < 1024)) {
|
||||
pgt = (page_table_t*) (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
if (pgt && pgt->entries[index2]) {
|
||||
phyaddr = pgt->entries[index2] & PAGE_MASK;
|
||||
newflags = pgt->entries[index2] & 0xFFF; // get old flags
|
||||
|
||||
if (!(newflags & PG_SVM_INIT)) {
|
||||
if ((newflags & PG_SVM_STRONG) && !(newflags & PG_PRESENT) && (flags & (VMA_READ|VMA_WRITE) && !(flags & VMA_NOACCESS)))
|
||||
newflags |= PG_PRESENT;
|
||||
else if ((newflags & PG_SVM_STRONG) && (newflags & PG_PRESENT) && (flags & VMA_NOACCESS))
|
||||
newflags &= ~PG_PRESENT;
|
||||
}
|
||||
|
||||
// update flags
|
||||
if (!(flags & VMA_WRITE)) {
|
||||
newflags &= ~PG_RW;
|
||||
#ifdef CONFIG_ROCKCREEK
|
||||
if (newflags & (PG_SVM_STRONG|PG_SVM_LAZYRELEASE))
|
||||
newflags &= ~PG_MPE;
|
||||
#endif
|
||||
} else {
|
||||
newflags |= PG_RW;
|
||||
#ifdef CONFIG_ROCKCREEK
|
||||
if (newflags & (PG_SVM_STRONG|PG_SVM_LAZYRELEASE))
|
||||
newflags |= PG_MPE;
|
||||
#endif
|
||||
}
|
||||
|
||||
pgt->entries[index2] = (newflags & 0xFFF) | (phyaddr & PAGE_MASK);
|
||||
|
||||
tlb_flush_one_page(viraddr);
|
||||
}
|
||||
|
||||
index2++;
|
||||
viraddr += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
spinlock_unlock(&task->pgd_lock);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Use the first fit algorithm to find a valid address range
|
||||
*
|
||||
* TODO: O(n) => bad performance, we need a better approach
|
||||
*/
|
||||
size_t vm_alloc(uint32_t npages, uint32_t flags)
|
||||
{
|
||||
#if 0
|
||||
task_t* task = per_core(current_task);
|
||||
spinlock_t* pgd_lock;
|
||||
uint32_t index1, index2, j;
|
||||
size_t viraddr, i, ret = 0;
|
||||
size_t start, end;
|
||||
page_table_t* pgt;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
|
||||
return 0;
|
||||
|
||||
if (flags & MAP_KERNEL_SPACE) {
|
||||
pgd_lock = &kslock;
|
||||
start = (((size_t) &kernel_end) + PAGE_SIZE) & PAGE_MASK;
|
||||
end = (KERNEL_SPACE - 2*PAGE_SIZE) & PAGE_MASK; // we need 1 PAGE for our PGTs
|
||||
} else {
|
||||
pgd_lock = &task->pgd_lock;
|
||||
start = KERNEL_SPACE & PAGE_MASK;
|
||||
end = PAGE_MASK;
|
||||
}
|
||||
|
||||
if (BUILTIN_EXPECT(!npages, 0))
|
||||
return 0;
|
||||
|
||||
spinlock_lock(pgd_lock);
|
||||
|
||||
viraddr = i = start;
|
||||
j = 0;
|
||||
do {
|
||||
index1 = i >> 22;
|
||||
index2 = (i >> 12) & 0x3FF;
|
||||
|
||||
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
if (!pgt || !(pgt->entries[index2])) {
|
||||
i+=PAGE_SIZE;
|
||||
j++;
|
||||
} else {
|
||||
// restart search
|
||||
j = 0;
|
||||
viraddr = i + PAGE_SIZE;
|
||||
i = i + PAGE_SIZE;
|
||||
}
|
||||
} while((j < npages) && (i<=end));
|
||||
|
||||
if ((j >= npages) && (viraddr < end))
|
||||
ret = viraddr;
|
||||
|
||||
spinlock_unlock(pgd_lock);
|
||||
|
||||
return ret;
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
int unmap_region(size_t viraddr, uint32_t npages)
|
||||
{
|
||||
#if 0
|
||||
task_t* task = per_core(current_task);
|
||||
spinlock_t* pgd_lock;
|
||||
uint32_t i;
|
||||
uint32_t index1, index2;
|
||||
page_table_t* pgt;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
|
||||
return -EINVAL;
|
||||
|
||||
if (viraddr <= KERNEL_SPACE)
|
||||
pgd_lock = &kslock;
|
||||
else
|
||||
pgd_lock = &task->pgd_lock;
|
||||
|
||||
spinlock_lock(pgd_lock);
|
||||
|
||||
for(i=0; i<npages; i++, viraddr+=PAGE_SIZE)
|
||||
{
|
||||
index1 = viraddr >> 22;
|
||||
index2 = (viraddr >> 12) & 0x3FF;
|
||||
|
||||
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
if (!pgt)
|
||||
continue;
|
||||
pgt->entries[index2] &= ~PG_PRESENT;
|
||||
|
||||
if (viraddr > KERNEL_SPACE)
|
||||
atomic_int32_dec(&task->user_usage);
|
||||
|
||||
tlb_flush_one_page(viraddr);
|
||||
}
|
||||
|
||||
spinlock_unlock(pgd_lock);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vm_free(size_t viraddr, uint32_t npages)
|
||||
{
|
||||
#if 0
|
||||
task_t* task = per_core(current_task);
|
||||
spinlock_t* pgd_lock;
|
||||
uint32_t i;
|
||||
uint32_t index1, index2;
|
||||
page_table_t* pgt;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
|
||||
return -EINVAL;
|
||||
|
||||
if (viraddr <= KERNEL_SPACE)
|
||||
pgd_lock = &kslock;
|
||||
else
|
||||
pgd_lock = &task->pgd_lock;
|
||||
|
||||
spinlock_lock(pgd_lock);
|
||||
|
||||
for(i=0; i<npages; i++, viraddr+=PAGE_SIZE)
|
||||
{
|
||||
index1 = viraddr >> 22;
|
||||
index2 = (viraddr >> 12) & 0x3FF;
|
||||
|
||||
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
if (!pgt)
|
||||
continue;
|
||||
pgt->entries[index2] = 0;
|
||||
|
||||
tlb_flush_one_page(viraddr);
|
||||
}
|
||||
|
||||
spinlock_unlock(pgd_lock);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if 0
|
||||
int print_paging_tree(size_t viraddr)
|
||||
{
|
||||
task_t* task = per_core(current_task);
|
||||
uint32_t index1, index2;
|
||||
page_dir_t* pgd = NULL;
|
||||
page_table_t* pgt = NULL;
|
||||
|
||||
if (BUILTIN_EXPECT(!viraddr, 0))
|
||||
return -EINVAL;
|
||||
|
||||
index1 = viraddr >> 22;
|
||||
index2 = (viraddr >> 12) & 0x3FF;
|
||||
|
||||
spinlock_lock(&task->pgd_lock);
|
||||
|
||||
kprintf("Paging dump of address 0x%x\n", viraddr);
|
||||
pgd = task->pgd;
|
||||
kprintf("\tPage directory entry %u: ", index1);
|
||||
if (pgd) {
|
||||
kprintf("0x%0x\n", pgd->entries[index1]);
|
||||
pgt = (page_table_t*) (pgd->entries[index1] & PAGE_MASK);
|
||||
} else
|
||||
kputs("invalid page directory\n");
|
||||
|
||||
/* convert physical address to virtual */
|
||||
if (paging_enabled && pgt)
|
||||
pgt = (page_table_t*) (KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE);
|
||||
|
||||
kprintf("\tPage table entry %u: ", index2);
|
||||
if (pgt)
|
||||
kprintf("0x%x\n", pgt->entries[index2]);
|
||||
else
|
||||
kputs("invalid page table\n");
|
||||
|
||||
spinlock_unlock(&task->pgd_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pagefault_handler(struct state *s)
|
||||
{
|
||||
task_t* task = per_core(current_task);
|
||||
page_dir_t* pgd = task->pgd;
|
||||
page_table_t* pgt = NULL;
|
||||
size_t viraddr = read_cr2();
|
||||
size_t phyaddr;
|
||||
#ifdef CONFIG_ROCKCREEK
|
||||
uint32_t index1, index2;
|
||||
#endif
|
||||
|
||||
if ((viraddr >= task->start_heap) && (viraddr <= task->end_heap) && (viraddr > KERNEL_SPACE)) {
|
||||
viraddr = viraddr & PAGE_MASK;
|
||||
|
||||
phyaddr = get_page();
|
||||
if (BUILTIN_EXPECT(!phyaddr, 0))
|
||||
goto default_handler;
|
||||
|
||||
if (map_region(viraddr, phyaddr, 1, MAP_USER_SPACE) == viraddr) {
|
||||
memset((void*) viraddr, 0x00, PAGE_SIZE);
|
||||
return;
|
||||
}
|
||||
|
||||
kprintf("Could not map 0x%x at 0x%x\n", phyaddr, viraddr);
|
||||
put_page(phyaddr);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ROCKCREEK
|
||||
// does our SVM system need to handle this page fault?
|
||||
index1 = viraddr >> 22;
|
||||
index2 = (viraddr >> 12) & 0x3FF;
|
||||
if (!pgd || !(pgd->entries[index1] & PAGE_MASK))
|
||||
goto default_handler;
|
||||
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
if (!pgt || !(pgt->entries[index2]))
|
||||
goto default_handler;
|
||||
if (pgt->entries[index2] & PG_SVM_INIT) {
|
||||
if (BUILTIN_EXPECT(!svm_alloc_page(viraddr, pgt), 1))
|
||||
return;
|
||||
else
|
||||
goto default_handler;
|
||||
}
|
||||
if (pgt->entries[index2] & PG_SVM_STRONG)
|
||||
if (BUILTIN_EXPECT(!svm_access_request(viraddr), 1))
|
||||
return;
|
||||
#endif
|
||||
|
||||
default_handler:
|
||||
kprintf("PAGE FAULT: Task %u got page fault at %p (irq %d, cs:eip 0x%x:0x%x)\n", task->id, viraddr, s->int_no, s->cs, s->eip);
|
||||
kprintf("Register state: eax = 0x%x, ebx = 0x%x, ecx = 0x%x, edx = 0x%x, edi = 0x%x, esi = 0x%x, ebp = 0x%x, esp = 0x%x\n",
|
||||
s->eax, s->ebx, s->ecx, s->edx, s->edi, s->esi, s->ebp, s->esp);
|
||||
|
||||
irq_enable();
|
||||
abort();
|
||||
}
|
||||
#endif
|
||||
|
||||
int arch_paging_init(void)
|
||||
{
|
||||
#if 0
|
||||
uint32_t i, npages, index1, index2;
|
||||
page_table_t* pgt;
|
||||
size_t viraddr;
|
||||
|
||||
// uninstall default handler and install our own
|
||||
irq_uninstall_handler(14);
|
||||
irq_install_handler(14, pagefault_handler);
|
||||
|
||||
// Create a page table to reference to the other page tables
|
||||
pgt = (page_table_t*) get_page();
|
||||
if (!pgt) {
|
||||
kputs("arch_paging_init: Not enough memory!\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(pgt, 0, PAGE_SIZE);
|
||||
|
||||
// map this table at the end of the kernel space
|
||||
viraddr = KERNEL_SPACE - PAGE_SIZE;
|
||||
index1 = viraddr >> 22;
|
||||
index2 = (viraddr >> 12) & 0x3FF;
|
||||
|
||||
// now, we create a self reference
|
||||
per_core(current_task)->pgd->entries[index1] = (((size_t) pgt) & PAGE_MASK)|KERN_TABLE;
|
||||
pgt->entries[index2] = ((size_t) pgt & PAGE_MASK)|KERN_PAGE;
|
||||
|
||||
// create the other PGTs for the kernel space
|
||||
for(i=0; i<KERNEL_SPACE/(1024*PAGE_SIZE)-1; i++) {
|
||||
size_t phyaddr = get_page();
|
||||
|
||||
if (!phyaddr) {
|
||||
kputs("arch_paging_init: Not enough memory!\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset((void*) phyaddr, 0, PAGE_SIZE);
|
||||
per_core(current_task)->pgd->entries[i] = (phyaddr & PAGE_MASK)|KERN_TABLE;
|
||||
pgt->entries[i] = (phyaddr & PAGE_MASK)|KERN_PAGE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the page table and page directory entries for the kernel. We map the kernel's physical address
|
||||
* to the same virtual address.
|
||||
*/
|
||||
npages = ((size_t) &kernel_end - (size_t) &kernel_start) >> PAGE_SHIFT;
|
||||
if ((size_t)&kernel_end & (PAGE_SIZE-1))
|
||||
npages++;
|
||||
map_region((size_t)&kernel_start, (size_t)&kernel_start, npages, MAP_KERNEL_SPACE);
|
||||
|
||||
#if MAX_CORES > 1
|
||||
// Reserve page for smp boot code
|
||||
if (!map_region(SMP_SETUP_ADDR, SMP_SETUP_ADDR, 1, MAP_KERNEL_SPACE|MAP_NO_CACHE)) {
|
||||
kputs("could not reserve page for smp boot code\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_VGA
|
||||
// map the video memory into the kernel space
|
||||
map_region(VIDEO_MEM_ADDR, VIDEO_MEM_ADDR, 1, MAP_KERNEL_SPACE|MAP_NO_CACHE);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MULTIBOOT
|
||||
/*
|
||||
* of course, mb_info has to map into the kernel space
|
||||
*/
|
||||
if (mb_info)
|
||||
map_region((size_t) mb_info & PAGE_MASK, (size_t) mb_info & PAGE_MASK, 1, MAP_KERNEL_SPACE);
|
||||
|
||||
#if 0
|
||||
/*
|
||||
* Map reserved memory regions into the kernel space
|
||||
*/
|
||||
if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MEM_MAP)) {
|
||||
multiboot_memory_map_t* mmap = (multiboot_memory_map_t*) mb_info->mmap_addr;
|
||||
multiboot_memory_map_t* mmap_end = (void*) ((size_t) mb_info->mmap_addr + mb_info->mmap_length);
|
||||
|
||||
while (mmap < mmap_end) {
|
||||
if (mmap->type != MULTIBOOT_MEMORY_AVAILABLE) {
|
||||
npages = mmap->len / PAGE_SIZE;
|
||||
if ((mmap->addr+mmap->len) % PAGE_SIZE)
|
||||
npages++;
|
||||
map_region(mmap->addr, mmap->addr, npages, MAP_KERNEL_SPACE|MAP_NO_CACHE);
|
||||
}
|
||||
mmap++;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Modules like the init ram disk are already loaded.
|
||||
* Therefore, we map these moduels into the kernel space.
|
||||
*/
|
||||
if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MODS)) {
|
||||
multiboot_module_t* mmodule = (multiboot_module_t*) ((size_t) mb_info->mods_addr);
|
||||
|
||||
npages = mb_info->mods_count * sizeof(multiboot_module_t) >> PAGE_SHIFT;
|
||||
if (mb_info->mods_count * sizeof(multiboot_module_t) & (PAGE_SIZE-1))
|
||||
npages++;
|
||||
map_region((size_t) (mb_info->mods_addr), (size_t) (mb_info->mods_addr), npages, MAP_KERNEL_SPACE);
|
||||
|
||||
for(i=0; i<mb_info->mods_count; i++, mmodule++) {
|
||||
// map physical address to the same virtual address
|
||||
npages = (mmodule->mod_end - mmodule->mod_start) >> PAGE_SHIFT;
|
||||
if (mmodule->mod_end & (PAGE_SIZE-1))
|
||||
npages++;
|
||||
map_region((size_t) (mmodule->mod_start), (size_t) (mmodule->mod_start), npages, MAP_KERNEL_SPACE);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ROCKCREEK
|
||||
// map SCC's bootinfo
|
||||
viraddr = map_region(SCC_BOOTINFO, SCC_BOOTINFO, 1, MAP_KERNEL_SPACE);
|
||||
kprintf("Map SCC's bootinfos at 0x%x\n", viraddr);
|
||||
|
||||
// map SCC's configuration registers
|
||||
viraddr = map_region(CRB_X0_Y0, CRB_X0_Y0, (CRB_OWN-CRB_X0_Y0+16*1024*1024) >> PAGE_SHIFT, MAP_KERNEL_SPACE|MAP_NO_CACHE);
|
||||
kprintf("Map configuration registers at 0x%x\n", viraddr);
|
||||
|
||||
// map SCC's message passing buffers
|
||||
viraddr = map_region(MPB_X0_Y0, MPB_X0_Y0, (MPB_OWN-MPB_X0_Y0+16*1024*1024) >> PAGE_SHIFT, MAP_KERNEL_SPACE|MAP_MPE);
|
||||
kprintf("Map message passing buffers at 0x%x\n", viraddr);
|
||||
|
||||
// map the FPGA registers
|
||||
viraddr = map_region(FPGA_BASE, FPGA_BASE, 0x10000 >> PAGE_SHIFT, MAP_KERNEL_SPACE|MAP_NO_CACHE);
|
||||
kprintf("Map FPGA regsiters at 0x%x\n", viraddr);
|
||||
#endif
|
||||
|
||||
/* enable paging */
|
||||
write_cr3((size_t) &boot_pgd);
|
||||
i = read_cr0();
|
||||
i = i | (1 << 31);
|
||||
write_cr0(i);
|
||||
paging_enabled = 1;
|
||||
|
||||
#ifdef CONFIG_ROCKCREEK
|
||||
// map the initial ramdisk
|
||||
npages = bootinfo->size >> PAGE_SHIFT;
|
||||
if (bootinfo->size & (PAGE_SIZE-1))
|
||||
npages++;
|
||||
viraddr = map_region(0, bootinfo->addr, npages, MAP_KERNEL_SPACE);
|
||||
kprintf("Map initrd from 0x%x to 0x%x (size %u bytes)\n", bootinfo->addr, viraddr, bootinfo->size);
|
||||
bootinfo->addr = viraddr;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* we turned on paging
|
||||
* => now, we are able to register our task for Task State Switching
|
||||
*/
|
||||
register_task(per_core(current_task));
|
||||
|
||||
// APIC registers into the kernel address space
|
||||
map_apic();
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -62,12 +62,17 @@ int main(void)
|
|||
{
|
||||
tid_t id;
|
||||
|
||||
lowlevel_init();
|
||||
|
||||
vga_init();
|
||||
vga_puts("aaa");
|
||||
//lowlevel_init();
|
||||
|
||||
pushbg(COL_BLUE);
|
||||
kprintf("This is MetalSVM %s Build %u, %u\n",
|
||||
METALSVM_VERSION, &__BUILD_DATE, &__BUILD_TIME);
|
||||
popbg();
|
||||
|
||||
while(1);
|
||||
system_init();
|
||||
irq_init();
|
||||
timer_init();
|
||||
|
|
Loading…
Add table
Reference in a new issue