added first version of paging code

(only page_map() works for now)
This commit is contained in:
Steffen Vogel 2014-08-21 20:17:02 +02:00
parent 9b4714113a
commit fbf55178c1
6 changed files with 270 additions and 36 deletions

View file

@ -50,7 +50,7 @@
/// Page map bits
#define PAGE_MAP_BITS 10
/// Number of page map indirections
#define PAGE_MAP_LEVELS 2
#define PAGE_LEVELS 2
/// Mask the page address without page map flags
#define PAGE_MASK 0xFFFFF000
@ -93,11 +93,14 @@
#define PG_BOOT (1 << 9)
/** @brief A single entry in a page map
/** @brief Converts a virtual address to a physical
*
* Usually used as a pointer to a mapped page map entry.
* A non mapped virtual address causes a pagefault!
*
* @param addr Virtual address to convert
* @return physical address
*/
typedef size_t page_entry_t;
size_t page_virt_to_phys(size_t vir);
/** @brief Initialize paging subsystem
*
@ -112,27 +115,19 @@ int page_init();
*
* @param viraddr
* @param phyaddr
* @param pages
* @param flags
* @param npages
* @param bits
* @return
*/
int page_map(size_t viraddr, size_t phyaddr, size_t pages, size_t flags);
int page_map(size_t viraddr, size_t phyaddr, size_t npages, size_t bits);
/** @brief Unmap a continious region of pages
*
* @param viraddr
* @param pages
* @param npages
* @return
*/
int page_unmap(size_t viraddr, size_t pages);
/** @brief Copy a single page frame
*
* @param dest
* @param src
* @return
*/
int page_copy(size_t dest, size_t src);
int page_unmap(size_t viraddr, size_t npages);
/** @brief Copy a whole page map tree
*
@ -140,19 +135,12 @@ int page_copy(size_t dest, size_t src);
* @param src
* @return
*/
int page_map_copy(page_entry_t *dest, page_entry_t *src);
int page_map_copy(size_t *dest, size_t *src);
/** @brief Free a whole page map tree
*
* @param map
*/
int page_map_drop(page_entry_t *map);
/** @brief Converts a virtual address to a physical
*
* @param viraddr Virtual address to convert
* @return Physical address
*/
size_t virt_to_phys(size_t viraddr);
int page_map_drop(size_t *map);
#endif

View file

@ -1,4 +1,4 @@
;
; Copyright (c) 2010, Stefan Lankes, RWTH Aachen University
; All rights reserved.
;
@ -318,22 +318,25 @@ ALIGN 4
mb_info:
DD 0
global boot_stack
ALIGN 4096
global boot_stack
boot_stack:
TIMES (KERNEL_STACK_SIZE) DB 0xcd
; Bootstrap page tables are used during the initialization.
; These tables do a simple identity paging and will
; be replaced in page_init() by more fine-granular mappings.
global boot_map
ALIGN 4096
global boot_map
boot_map:
boot_pgd:
DD boot_pgt + 0x103 ; PG_GLOBAL | PG_RW | PG_PRESENT
times 1023 DD 0 ; PAGE_MAP_ENTRIES - 1
boot_pgt:
%assign i 0
%rep 1024
DD (i << 12) | 0x083
%assign i i+1
%rep 1024 ; PAGE_MAP_ENTRIES
DD i | 0x203 ; PG_BOOT | PG_RW | PG_PRESENT
%assign i i + 4096 ; PAGE_SIZE
%endrep
; add some hints to the ELF file

4
arch/x86/mm/Makefile Normal file
View file

@ -0,0 +1,4 @@
C_source := page.c
MODULE := arch_x86_mm
include $(TOPDIR)/Makefile.inc

238
arch/x86/mm/page.c Normal file
View file

@ -0,0 +1,238 @@
/*
* Copyright (c) 2010, Stefan Lankes, RWTH Aachen University
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <eduos/stdio.h>
#include <eduos/memory.h>
#include <eduos/errno.h>
#include <eduos/string.h>
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/io.h>
/* Note that linker symbols are not variables, they have no memory
* allocated for maintaining a value, rather their address is their value. */
extern const void kernel_start;
extern const void kernel_end;
/** This PGD table is initialized in entry.asm */
extern size_t boot_map[PAGE_MAP_ENTRIES];
/** A self-reference enables direct access to all page tables */
static size_t* self[PAGE_LEVELS] = {
(size_t *) PAGE_MAP_PGT, (size_t *) PAGE_MAP_PGD
};
#define self_child(lvl, vpn) &self[lvl-1][vpn<<PAGE_MAP_BITS]
#define self_parent(lvl, vpn) &self[lvl+1][vpn>>PAGE_MAP_BITS]
/** @todo Does't handle huge pages for now
* @todo This will cause a pagefaut if addr isn't mapped! */
size_t page_virt_to_phys(size_t addr)
{
size_t vpn = addr >> PAGE_BITS; // virtual page number
size_t entry = self[0][vpn]; // page table entry
size_t off = addr & ~PAGE_MASK; // offset within page
size_t phy = entry & PAGE_MASK; // physical page frame number
return phy | off;
}
int page_map(size_t viraddr, size_t phyaddr, size_t npages, size_t bits)
{
int lvl;
long vpn = viraddr >> PAGE_BITS;
long first[PAGE_LEVELS], last[PAGE_LEVELS]; // index boundaries for self-mapping
for (lvl=0; lvl<PAGE_LEVELS; lvl++) {
first[lvl] = (vpn ) >> (lvl * PAGE_MAP_BITS);
last[lvl] = (vpn + npages) >> (lvl * PAGE_MAP_BITS);
}
/* We start now iterating through the entries
* beginning at the root table (PGD) */
for (lvl=PAGE_LEVELS-1; lvl>=0; lvl--) {
for (vpn=first[lvl]; vpn<=last[lvl]; vpn++) {
if (lvl) { /* PML4, PDPT, PGD */
if (self[lvl][vpn] & PG_PRESENT) {
/* There already an existing table which only allows
* kernel accesses. We need to copy the table to create
* private copy for the user space process */
if (!(self[lvl][vpn] & PG_USER) && (bits & PG_USER)) {
size_t phyaddr = get_pages(1);
if (BUILTIN_EXPECT(!phyaddr, 0))
return -ENOMEM;
/* Copy old table contents to new one.
* We temporarily use page zero for this
* by mapping the new table to this address. */
page_map(0, phyaddr, 1, PG_RW | PG_PRESENT);
memcpy(0, self_child(lvl, vpn), PAGE_SIZE);
/* Update table by replacing address and altering flags */
self[lvl][vpn] &= ~(PAGE_MASK | PG_GLOBAL);
self[lvl][vpn] |= phyaddr | PG_USER;
/* We only need to flush the self-mapped table.
* TLB entries mapped by this table remain valid
* because we only made an identical copy. */
tlb_flush_one_page((size_t) self_child(lvl, vpn));
}
}
else {
/* There's no table available which covers the region.
* Therefore we need to create a new empty table. */
size_t phyaddr = get_pages(1);
if (BUILTIN_EXPECT(!phyaddr, 0))
return -ENOMEM;
/* Reference the new table in the parent */
self[lvl][vpn] = phyaddr | bits;
/* Fill new table with zeros */
memset(self_child(lvl, vpn), 0, PAGE_SIZE);
}
}
else { /* PGT */
if (self[lvl][vpn] & PG_PRESENT)
tlb_flush_one_page(vpn << PAGE_BITS);
self[lvl][vpn] = phyaddr | bits;
phyaddr += PAGE_SIZE;
}
}
}
return 0;
}
int page_unmap(size_t viraddr, size_t npages)
{
int lvl;
long vpn = viraddr >> PAGE_BITS;
long first[PAGE_LEVELS], last[PAGE_LEVELS]; // index boundaries for self-mapping
for (lvl=0; lvl<PAGE_LEVELS; lvl++) {
first[lvl] = (vpn ) >> (lvl * PAGE_MAP_BITS);
last[lvl] = (vpn + npages) >> (lvl * PAGE_MAP_BITS);
}
/* We start now iterating through the entries
* beginning at the root table (PGD) */
for (lvl=PAGE_LEVELS-1; lvl>=0; lvl--) {
for (vpn=first[lvl]; vpn<=last[lvl]; vpn++) {
if (lvl) { /* PML4, PDPT, PGD */
}
else { /* PGT */
}
}
}
return 0;
}
int page_map_drop(size_t *map)
{
int lvl;
long vpn;
/* We start now iterating through the entries
* beginning at the root table (PGD) */
for (lvl=PAGE_LEVELS-1; lvl>=0; lvl--) {
for (vpn=0; vpn<PAGE_MAP_ENTRIES; vpn++) {
if (lvl) { /* PML4, PDPT, PGD */
}
else { /* PGT */
}
}
}
return 0;
}
int page_map_copy(size_t *dest, size_t *src)
{
int lvl;
long vpn;
/* We start now iterating through the entries
* beginning at the root table (PGD) */
for (lvl=PAGE_LEVELS-1; lvl>=0; lvl--) {
for (vpn=0; vpn<PAGE_MAP_ENTRIES; vpn++) {
if (lvl) { /* PML4, PDPT, PGD */
}
else { /* PGT */
}
}
}
return 0;
}
void page_fault_handler(struct state *s)
{
size_t viraddr = read_cr2();
kprintf("Page Fault Exception (%d) at cs:ip = %#x:%#lx, address = %#lx\n",
s->int_no, s->cs, s->eip, viraddr);
outportb(0x20, 0x20);
while(1) HALT;
}
int page_init()
{
size_t npages;
// replace default pagefault handler
irq_uninstall_handler(14);
irq_install_handler(14, page_fault_handler);
// create self-reference
boot_map[PAGE_MAP_ENTRIES-1] = (size_t) &boot_map | PG_PRESENT | PG_RW;
// map kernel
npages = PAGE_FLOOR((size_t) &kernel_end - (size_t) &kernel_start) >> PAGE_BITS;
page_map((size_t) &kernel_start, (size_t) &kernel_start, npages, PG_PRESENT | PG_RW | PG_GLOBAL);
#ifdef CONFIG_VGA
// map video memory
page_map(VIDEO_MEM_ADDR, VIDEO_MEM_ADDR, 1, PG_PCD | PG_PRESENT | PG_RW);
#endif
// flush TLB to apply new mappings
flush_tlb();
return 0;
}

View file

@ -100,12 +100,12 @@ static int eduos_init(void)
// initialize .bss section
memset((void*)&bss_start, 0x00, ((size_t) &bss_end - (size_t) &bss_start));
memory_init();
system_init();
irq_init();
timer_init();
koutput_init();
multitasking_init();
memory_init();
return 0;
}

View file

@ -230,10 +230,11 @@ int memory_init(void)
}
// enable paging and map SMP, VGA, Multiboot modules etc.
/*ret = page_init();
if (BUILTIN_EXPECT(ret, 0))
ret = page_init();
if (BUILTIN_EXPECT(ret, 0)) {
kputs("Failed to initialize paging!\n");
return ret;
*/
}
return ret;
}