cleanup of macros and comments, idention etc...

This commit is contained in:
Steffen Vogel 2013-12-03 15:26:21 +01:00
parent edf178f39a
commit 2923b1a7ed
2 changed files with 110 additions and 104 deletions

View file

@ -21,6 +21,7 @@
* @file arch/x86/include/asm/page.h
* @brief Definitions and functions related to paging
* @author Stefan Lankes
* @author Steffen Vogel <steffen.vogel@rwth-aachen.de>
*
* This file defines the interface for paging as like structures related to paging.
*/
@ -31,80 +32,103 @@
#include <metalsvm/stddef.h>
#include <metalsvm/stdlib.h>
// 4KB pages
/// Page offset bits
#define PAGE_SHIFT 12
#ifdef CONFIG_X86_32
/// Number of page map indirections
#define PAGE_MAP_LEVELS 2
/// Page map bits
#define PAGE_MAP_SHIFT 10
/// Linear/virtual address width
#define VIRT_BITS 32
/// Physical address width (we dont support PAE)
#define PHYS_BITS 32
#elif defined(CONFIG_X86_64)
/// Number of page map indirections
#define PAGE_MAP_LEVELS 4
/// Page map bits
#define PAGE_MAP_SHIFT 9
/// Linear/virtual address width
#define VIRT_BITS 48
/// Physical address width (maximum value)
#define PHYS_BITS 52
#endif
// base addresses of page map structures
/// The size of a single page in bytes
#define PAGE_SIZE ( 1L << PAGE_SHIFT)
/// The number of entries in a page map table
#define PAGE_MAP_ENTRIES ( 1L << PAGE_MAP_SHIFT)
/// Mask the page address
#define PAGE_MASK (-1L << PAGE_SHIFT)
/// Mask the entry in a page table
#define PAGE_ENTRY_MASK (-1L << (PAGE_SHIFT-PAGE_MAP_SHIFT))
/// Sign extension to get a valid canonical address (hack: by using aritmethic shifts)
#define VIRT_SEXT(addr) ((ssize_t) addr << (BITS-VIRT_BITS) >> (BITS-VIRT_BITS))
// base addresses of page map tables
#ifdef CONFIG_X86_32
#define PAGE_PGD 0xFFFFF000
#define PAGE_PGT 0xFFC00000
#define PAGE_MAP_PGD 0xFFFFF000
#define PAGE_MAP_PGT 0xFFC00000
#elif defined(CONFIG_X86_64)
#define PAGE_PML4 0xFFFFFFFFFFFFF000
#define PAGE_PDPT 0xFFFFFFFFFFE00000
#define PAGE_PGD 0xFFFFFFFFC0000000
#define PAGE_PGT 0xFFFFFF8000000000
#define PAGE_MAP_PML4 0xFFFFFFFFFFFFF000
#define PAGE_MAP_PDPT 0xFFFFFFFFFFE00000
#define PAGE_MAP_PGD 0xFFFFFFFFC0000000
#define PAGE_MAP_PGT 0xFFFFFF8000000000
#endif
#define PAGE_MAP_ENTRIES (1 << PAGE_MAP_SHIFT)
#define PAGE_SIZE (1 << PAGE_SHIFT)
#define PAGE_MASK ~(PAGE_SIZE - 1)
#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
/// Page is present
#define PG_PRESENT (1 << 0)
#define PG_PRESENT (1 << 0)
/// Page is read- and writable
#define PG_RW (1 << 1)
#define PG_RW (1 << 1)
/// Page is addressable from userspace
#define PG_USER (1 << 2)
#define PG_USER (1 << 2)
/// Page write through is activated
#define PG_PWT (1 << 3)
#define PG_PWT (1 << 3)
/// Page cache is disabled
#define PG_PCD (1 << 4)
#define PG_PCD (1 << 4)
/// Page was recently accessed (set by CPU)
#define PG_ACCESSED (1 << 5)
#define PG_ACCESSED (1 << 5)
/// Page is dirty due to recentwrite-access (set by CPU)
#define PG_DIRTY (1 << 6)
#define PG_DIRTY (1 << 6)
/// Big page: 4MB (or 2MB)
#define PG_PSE (1 << 7)
#define PG_PSE (1 << 7)
/// Page is part of the MPB (SCC specific entry)
#define PG_MPE PG_PSE
#define PG_MPE PG_PSE
/// Global TLB entry (Pentium Pro and later)
#define PG_GLOBAL (1 << 8)
#define PG_GLOBAL (1 << 8)
/// Pattern flag
#define PG_PAT (1 << 7)
#define PG_PAT (1 << 7)
/// This virtual address range is used by SVM system as marked
#define PG_SVM (1 << 9)
#define PG_SVM_STRONG PG_SVM_STRONG
#define PG_SVM (1 << 9)
#define PG_SVM_STRONG PG_SVM
/// This virtual address range is used by SVM system as marked
#define PG_SVM_LAZYRELEASE (1 << 10)
/// Currently, no page frame is behind this page (only the MBP proxy)
#define PG_SVM_INIT (1 << 11)
#define PG_SVM_INIT (1 << 11)
/// This is a whole set of flags (PRESENT,RW,ACCESSED,DIRTY) for kernelspace tables
#define KERN_TABLE (PG_PRESENT|PG_RW|PG_ACCESSED|PG_DIRTY)
#define KERN_TABLE (PG_PRESENT|PG_RW|PG_ACCESSED|PG_DIRTY)
/// This is a whole set of flags (PRESENT,RW,ACCESSED,DIRTY,USER) for userspace tables
#define USER_TABLE (PG_PRESENT|PG_RW|PG_ACCESSED|PG_DIRTY|PG_USER)
#define USER_TABLE (PG_PRESENT|PG_RW|PG_ACCESSED|PG_DIRTY|PG_USER)
/// This is a whole set of flags (PRESENT,RW,GLOBAL) for kernelspace pages
#define KERN_PAGE (PG_PRESENT|PG_RW|PG_GLOBAL)
#define KERN_PAGE (PG_PRESENT|PG_RW|PG_GLOBAL)
/// This is a whole set of flags (PRESENT,RW,USER) for userspace pages
#define USER_PAGE (PG_PRESENT|PG_RW|PG_USER)
#define USER_PAGE (PG_PRESENT|PG_RW|PG_USER)
/** @brief A single entry in a page map */
typedef size_t page_entry_t;
/** @brief General page map structure
*
* This page map structure is a general type for all indirecton levels.\n
* This page map structure is a general type for all indirecton levels.
* As all page map levels containing the same amount of entries.
* All page maps must be page aligned!
*/
typedef struct page_map {
size_t entries[PAGE_MAP_ENTRIES];
} __attribute__ ((aligned (4096))) page_map_t;
page_entry_t entries[PAGE_MAP_ENTRIES];
} __attribute__ ((aligned (PAGE_SIZE))) page_map_t;
/** @brief Converts a virtual address to a physical
*
@ -197,7 +221,7 @@ page_map_t* get_boot_page_map(void);
* - counter of allocated page tables
* - -ENOMEM (-12) on failure
*/
int create_page_map(task_t* task, int copy);
int create_page_map(struct task* task, int copy);
/** @brief Delete all page map structures of the current task
*

View file

@ -43,22 +43,54 @@
* 0xFF8000000000 - 0xFFFFFFFFFFFF: Paging structures are mapped in this region (max 512GB)
*/
/*
* Note that linker symbols are not variables, they have no memory allocated for
* maintaining a value, rather their address is their value.
*/
extern const void kernel_start;
extern const void kernel_end;
// boot task's page map and page map lock
/// Boot task's page map
extern page_map_t boot_pml4;
/// Kernel space page map lock
static spinlock_t kslock = SPINLOCK_INIT;
/// Short abbrevations for the page map levels
static const char* page_map_names[] = {"PGT", "PGD", "PDPT", "PML4"};
page_map_t* get_boot_page_map(void)
{
return &boot_pml4;
}
/** @brief Get the corresponding page map entry to a given virtual address */
static page_entry_t* virt_to_entry(size_t addr, int level) {
return (page_entry_t*) ((((ssize_t) addr | (-1L << VIRT_BITS)) >> ((level+1) * PAGE_MAP_SHIFT)) & ~0x7);
}
/** @brief Get the corresponding virtual address to a page map entry */
static size_t entry_to_virt(page_entry_t* entry, int level) {
return VIRT_SEXT((size_t) entry << ((level+1) * PAGE_MAP_SHIFT));
}
size_t virt_to_phys(size_t viraddr) {
task_t* task = per_core(current_task);
spinlock_irqsave_lock(&task->page_lock);
size_t* entry = (size_t*) (PAGE_MAP_PGT | (viraddr >> 9));
size_t phyaddr = (*entry & PAGE_MASK) | (viraddr & ~PAGE_MASK);
spinlock_irqsave_unlock(&task->page_lock);
return phyaddr;
}
/** @brief Get level of a given page map entry
*
* @param entry A pointer to a page map entry
* @return The page map level or -1 on failure
*/
static int entry_to_level(page_entry_t* entry) {
int level = -1;
while (entry >= virt_to_entry(0, level+1))
level++;
return level;
}
/** @brief Copy a single page frame
*
* @param src virtual address of source page frame
@ -97,39 +129,6 @@ static size_t copy_page_frame(size_t *src)
#endif
}
static inline size_t canonicalize(size_t addr)
{
if (addr & (1UL<<47))
return addr;
else
return addr & ((1UL<<48) - 1);
}
static inline int map_to_level(size_t addr)
{
if (addr >= PAGE_PML4)
return 4;
else if (addr >= PAGE_PDPT)
return 3;
else if (addr >= PAGE_PGD)
return 2;
else if (addr >= PAGE_PGT)
return 1;
else
return -EINVAL;
}
static inline const char * map_to_lvlname(size_t addr)
{
const char* names[] = {"(none)", "PGT", "PGD", "PDPT", "PML4"};
return names[map_to_level(addr)];
}
static inline size_t map_to_virt(size_t addr)
{
return canonicalize(addr << (map_to_level(addr) * PAGE_MAP_SHIFT));
}
/*
* Copy page maps using recursion
*
@ -150,10 +149,9 @@ static int copy_page_map(page_map_t *src, page_map_t *dest, int copy)
dest->entries[i] = 0;
else if (src->entries[i] & PG_USER) {
size_t phys;
kprintf("d:%p (%s: 0x%012lx) -> %p\n", &src->entries[i], map_to_lvlname((size_t) &src->entries[i]), map_to_virt((size_t) &src->entries[i]), &dest->entries[i]);
// deep copy user tables
if ((size_t) src >= PAGE_PGT) {
if ((size_t) src >= PAGE_MAP_PGT) {
phys = get_page();
if (BUILTIN_EXPECT(!phys, 0))
return -ENOMEM;
@ -176,10 +174,8 @@ static int copy_page_map(page_map_t *src, page_map_t *dest, int copy)
}
}
// shallow copy kernel only tables
else {
kprintf("s:%p (%s: 0x%012lx) -> %p\n", &src->entries[i], map_to_lvlname((size_t) &src->entries[i]), map_to_virt((size_t) &src->entries[i]), &dest->entries[i]);
else
dest->entries[i] = src->entries[i];
}
}
kputs("r\n");
@ -192,8 +188,8 @@ int create_page_map(task_t* task, int copy)
uint32_t ret;
// fixed mapping for paging structures
page_map_t *current = (page_map_t*) PAGE_PML4;
page_map_t *new = (page_map_t*) (PAGE_PML4 - 0x1000);
page_map_t *current = (page_map_t*) PAGE_MAP_PML4;
page_map_t *new = (page_map_t*) (PAGE_MAP_PML4 - 0x1000);
// get new pml4 table
phys = get_page();
@ -265,20 +261,6 @@ int drop_page_map(void)
#endif
}
size_t virt_to_phys(size_t viraddr)
{
task_t* task = per_core(current_task);
size_t phyaddr;
size_t* pte;
spinlock_irqsave_lock(&task->page_lock);
pte = (size_t *) (PAGE_PGT | (viraddr >> 9));
phyaddr = (*pte & PAGE_MASK) | (viraddr & ~PAGE_MASK);
spinlock_irqsave_unlock(&task->page_lock);
return phyaddr;
}
size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flags)
@ -313,7 +295,7 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
for(i=0; i<npages; i++, viraddr+=PAGE_SIZE, phyaddr+=PAGE_SIZE) {
// page table entry
size_t* pte = (size_t *) (PAGE_PGT|(viraddr >> 9));
size_t* pte = (size_t *) (PAGE_MAP_PGT|(viraddr >> PAGE_MAP_SHIFT));
if (*pte && !(flags & MAP_REMAP)) {
kprintf("map_region: 0x%lx is already mapped\n", viraddr);
@ -322,9 +304,9 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
}
if (flags & MAP_USER_SPACE)
*pte = phyaddr|USER_PAGE;
*pte = phyaddr | USER_PAGE;
else
*pte = phyaddr|KERN_PAGE;
*pte = phyaddr | KERN_PAGE;
if (flags & MAP_NO_CACHE)
*pte |= PG_PCD;
@ -479,7 +461,6 @@ static void pagefault_handler(struct state *s)
{
task_t* task = per_core(current_task);
size_t viraddr = read_cr2();
size_t phyaddr;
#if 0
if ((viraddr >= task->start_heap) && (viraddr <= task->end_heap) && (viraddr > KERNEL_SPACE)) {
@ -497,14 +478,16 @@ static void pagefault_handler(struct state *s)
kprintf("Could not map 0x%x at 0x%x\n", phyaddr, viraddr);
put_page(phyaddr);
}
#endif
/*
* handle missing paging structures for userspace
* all kernel space paging structures have been initialized in entry64.asm
*/
else if (viraddr >= PAGE_PGT) {
kprintf("map_region: missing paging structure at: 0x%lx (%s)\n", viraddr, map_to_lvlname(viraddr));
if (viraddr >= PAGE_MAP_PGT) {
kprintf("map_region: missing paging structure at: 0x%lx (%s)\n", viraddr, page_map_names[entry_to_level(viraddr)]);
phyaddr = get_page();
size_t phyaddr = get_page();
if (BUILTIN_EXPECT(!phyaddr, 0))
goto oom;
@ -519,7 +502,6 @@ static void pagefault_handler(struct state *s)
return;
}
#endif
kprintf("PAGE FAULT: Task %u got page fault at %p (irq %llu, cs:rip 0x%llx:0x%llx)\n", task->id, viraddr, s->int_no, s->cs, s->rip);
kprintf("Register state: rax = 0x%llx, rbx = 0x%llx, rcx = 0x%llx, rdx = 0x%llx, rdi = 0x%llx, rsi = 0x%llx, rbp = 0x%llx, rsp = 0x%llx\n",