added NX bit support

renamed some CPP macros
This commit is contained in:
Steffen Vogel 2014-01-09 12:49:04 +01:00
parent c21ea42058
commit 066e29fde9
2 changed files with 79 additions and 47 deletions

View file

@ -33,13 +33,13 @@
#include <metalsvm/stdlib.h>
/// Page offset bits
#define PAGE_SHIFT 12
#define PAGE_BITS 12
#ifdef CONFIG_X86_32
/// Number of page map indirections
#define PAGE_MAP_LEVELS 2
/// Page map bits
#define PAGE_MAP_SHIFT 10
#define PAGE_MAP_BITS 10
/// Total operand width in bits
#define BITS 32
/// Linear/virtual address width
@ -50,7 +50,7 @@
/// Number of page map indirections
#define PAGE_MAP_LEVELS 4
/// Page map bits
#define PAGE_MAP_SHIFT 9
#define PAGE_MAP_BITS 9
/// Total operand width in bits
#define BITS 64
/// Linear/virtual address width
@ -60,13 +60,15 @@
#endif
/// The size of a single page in bytes
#define PAGE_SIZE ( 1L << PAGE_SHIFT)
#define PAGE_SIZE ( 1L << PAGE_BITS)
/// The number of entries in a page map table
#define PAGE_MAP_ENTRIES ( 1L << PAGE_MAP_SHIFT)
#define PAGE_MAP_ENTRIES ( 1L << PAGE_MAP_BITS)
/// Mask the page address
#define PAGE_MASK (-1L << PAGE_SHIFT)
#define PAGE_MASK (-1L << PAGE_BITS)
/// Mask the entry in a page table
#define PAGE_ENTRY_MASK (-1L << (PAGE_SHIFT-PAGE_MAP_SHIFT))
#define PAGE_ENTRY_MASK (-1L << (PAGE_BITS-PAGE_MAP_BITS))
/// Mask for all flag bits in a page map entry (including ignored bits)
#define PAGE_FLAGS_MASK (~(-1L << PAGE_BITS) | (-1L << VIRT_BITS))
/// Align to next page
#define PAGE_FLOOR(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
@ -100,14 +102,14 @@
#define PG_ACCESSED (1 << 5)
/// Page is dirty due to recentwrite-access (set by CPU)
#define PG_DIRTY (1 << 6)
/// Big page: 4MB (or 2MB)
/// Huge page: 4MB (or 2MB, 1GB)
#define PG_PSE (1 << 7)
/// Page is part of the MPB (SCC specific entry)
#define PG_MPE PG_PSE
/// Page attribute table
#define PG_PAT PG_PSE
/// Global TLB entry (Pentium Pro and later)
#define PG_GLOBAL (1 << 8)
/// Pattern flag
#define PG_PAT (1 << 7)
/// This virtual address range is used by SVM system as marked
#define PG_SVM (1 << 9)
#define PG_SVM_STRONG PG_SVM
@ -115,6 +117,8 @@
#define PG_SVM_LAZYRELEASE (1 << 10)
/// Currently, no page frame is behind this page (only the MBP proxy)
#define PG_SVM_INIT (1 << 11)
/// Disable execution for this page
#define PG_XD (1L << 63)
/// This is a whole set of flags (PRESENT,RW,ACCESSED,DIRTY) for kernelspace tables
#define KERN_TABLE (PG_PRESENT|PG_RW|PG_ACCESSED|PG_DIRTY)
@ -146,6 +150,16 @@ typedef struct page_map {
*/
typedef int (*page_cb_t)(page_entry_t* entry, int level);
/** @brief Get the corresponding page map entry to a given virtual address */
static inline page_entry_t* virt_to_entry(size_t addr, int level) {
return (page_entry_t*) ((((ssize_t) addr | (-1L << VIRT_BITS)) >> ((level+1) * PAGE_MAP_BITS)) & ~0x7);
}
/** @brief Get the corresponding virtual address to a page map entry */
static inline size_t entry_to_virt(page_entry_t* entry, int level) {
return VIRT_SEXT((size_t) entry << ((level+1) * PAGE_MAP_BITS));
}
/** @brief Converts a virtual address to a physical
*
* @param viraddr Virtual address to convert

View file

@ -56,23 +56,13 @@ page_map_t* get_boot_page_map(void)
return &boot_pml4;
}
/** @brief Get the corresponding page map entry to a given virtual address */
static page_entry_t* virt_to_entry(size_t addr, int level) {
return (page_entry_t*) ((((ssize_t) addr | (-1L << VIRT_BITS)) >> ((level+1) * PAGE_MAP_SHIFT)) & ~0x7);
}
/** @brief Get the corresponding virtual address to a page map entry */
static size_t entry_to_virt(page_entry_t* entry, int level) {
return VIRT_SEXT((size_t) entry << ((level+1) * PAGE_MAP_SHIFT));
}
size_t virt_to_phys(size_t viraddr) {
task_t* task = per_core(current_task);
spinlock_irqsave_lock(&task->page_lock);
size_t* entry = (size_t*) (PAGE_MAP_PGT | (viraddr >> 9));
size_t phyaddr = (*entry & PAGE_MASK) | (viraddr & ~PAGE_MASK);
size_t phyaddr = (*entry & ~PAGE_FLAGS_MASK) | (viraddr & ~PAGE_MASK);
spinlock_irqsave_unlock(&task->page_lock);
@ -161,20 +151,21 @@ int page_iterate(size_t start, size_t end, page_cb_t pre, page_cb_t post)
void page_dump(size_t from, size_t to)
{
int flags = 0;
task_t* task = per_core(current_task);
size_t flags = 0;
size_t start = 0;
void print(size_t start, size_t end, int flags) {
void print(size_t start, size_t end, size_t flags) {
size_t size = end - start;
kprintf("%#018lx-%#018lx %#14x %c%c%c%c%c (%#x)\n", start, end, size,
//(flags & PG_XD) ? '-' : 'x',
kprintf("%#018lx-%#018lx %#14x %c%c%c%c%c%c\n", start, end, size,
(flags & PG_XD) ? '-' : 'x',
(flags & PG_GLOBAL) ? 'g' : '-',
(flags & PG_DIRTY) ? 'd' : '-',
(flags & PG_ACCESSED) ? 'a' : '-',
(flags & PG_USER) ? 'u' : '-',
(flags & PG_RW) ? 'w' : '-',
flags
(flags & PG_RW) ? 'w' : '-'
);
}
@ -184,14 +175,14 @@ void page_dump(size_t from, size_t to)
if (*entry & PG_PRESENT) {
if (!level || (*entry & PG_PSE)) {
if (!flags) {
flags = *entry & ~PAGE_MASK;
flags = *entry & PAGE_FLAGS_MASK;
start = entry_to_virt(entry, level);
}
else if (flags != (*entry & ~PAGE_MASK)) {
else if (flags != (*entry & PAGE_FLAGS_MASK)) {
end = entry_to_virt(entry, level);
print(start, end, flags);
start = end;
flags = *entry & ~PAGE_MASK;
flags = *entry & PAGE_FLAGS_MASK;
}
}
}
@ -204,31 +195,50 @@ void page_dump(size_t from, size_t to)
return 0;
}
kprintf("%-18s-%18s %14s %-5s %s\n", "start", "end", "size", "flags", "(hex)"); // header
// lock tables
spinlock_lock(&kslock);
spinlock_irqsave_lock(&task->page_lock);
kprintf("%-18s-%18s %14s %-6s\n", "start", "end", "size", "flags"); // header
page_iterate(from, to, cb, NULL);
// unlock tables
spinlock_unlock(&kslock);
spinlock_irqsave_unlock(&task->page_lock);
// workaround to print last mapping
if (flags) print(start, PAGE_FLOOR(to), flags);
if (flags)
print(start, PAGE_FLOOR(to), flags);
}
// TODO: add NX bit
void page_stats(size_t from, size_t to, int reset)
{
int i, stats[PAGE_SHIFT] = { 0 };
const char* labels[] = { "present", "writable", "user accessable", "write through", "cache disabled",
"accessed", "dirty", "huge pages", "global", "svm", "svm lazy", "svm init" };
task_t* task = per_core(current_task);
int i, stats[13] = { 0 };
const char* labels[] = { [0] = "present", "writable", "user accessable", "write through", "cache disabled", // IA-32 "legacy" bits
"accessed", "dirty", "huge pages", "global", "svm", "svm lazy", "svm init",
[12] = "exec disabled" // IA-32e / PAE bits
};
int cb(page_entry_t* entry, int level) {
if (*entry & PG_PRESENT) {
if (!level || (*entry & PG_PSE)) {
// increment stat counters
for (i=0; i<PAGE_SHIFT; i++)
stats[i] += (*entry & (1 << i)) ? 1 : 0;
int i;
for (i=0; i<12; i++) { // IA-32 "legacy" bits
if (*entry & (1 << i))
stats[i]++;
}
for (i=0; i<1; i++) { // IA-32e / PAE bits
if (*entry & (1 << 63-i))
stats[i+PAGE_BITS]++;
}
}
// reset accessed and dirty bits
if (reset) {
*entry &= ~(PG_ACCESSED | PG_DIRTY);
*entry &= ~(PG_ACCESSED|PG_DIRTY);
tlb_flush_one_page(entry_to_virt(entry, level)); // see IA32 Vol3 4.8
}
}
@ -236,10 +246,18 @@ void page_stats(size_t from, size_t to, int reset)
return 0;
}
// lock tables
spinlock_lock(&kslock);
spinlock_irqsave_lock(&task->page_lock);
page_iterate(from, to, cb, NULL);
// unlock tables
spinlock_unlock(&kslock);
spinlock_irqsave_unlock(&task->page_lock);
kprintf("total pages:\n");
for (i=0; i<PAGE_SHIFT; i++)
for (i=0; i<13; i++)
kprintf(" - %s:%*lu\n", labels[i], 25-strlen(labels[i]), stats[i]);
}
@ -689,7 +707,7 @@ int arch_paging_init(void)
#if MAX_CORES > 1
// reserve page for smp boot code
if (!map_region(SMP_SETUP_ADDR, SMP_SETUP_ADDR, 1, MAP_KERNEL_SPACE|MAP_NO_CACHE)) {
if (!map_region(SMP_SETUP_ADDR, SMP_SETUP_ADDR, 1, MAP_NO_CACHE | MAP_REMAP)) {
kputs("could not reserve page for smp boot code\n");
return -ENOMEM;
}
@ -707,7 +725,7 @@ int arch_paging_init(void)
npages = mmap->len / PAGE_SIZE;
if ((mmap->addr+mmap->len) % PAGE_SIZE)
npages++;
map_region(mmap->addr, mmap->addr, npages, MAP_KERNEL_SPACE|MAP_NO_CACHE);
map_region(mmap->addr, mmap->addr, npages, MAP_NO_CACHE | MAP_REMAP);
}
mmap++;
}
@ -720,15 +738,15 @@ int arch_paging_init(void)
*/
if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MODS)) {
multiboot_module_t* mmodule = (multiboot_module_t*) ((size_t) mb_info->mods_addr);
npages = PAGE_FLOOR(mb_info->mods_count*sizeof(multiboot_module_t)) >> PAGE_SHIFT;
npages = PAGE_FLOOR(mb_info->mods_count*sizeof(multiboot_module_t)) >> PAGE_BITS;
map_region((size_t) mmodule, (size_t) mmodule, npages, MAP_REMAP|MAP_KERNEL_SPACE);
map_region((size_t) mmodule, (size_t) mmodule, npages, MAP_REMAP);
for(i=0; i<mb_info->mods_count; i++, mmodule++) {
// map physical address to the same virtual address
npages = PAGE_FLOOR(mmodule->mod_end - mmodule->mod_start) >> PAGE_SHIFT;
kprintf("Map module %s at 0x%x (%u pages)\n", (char*)(size_t) mmodule->cmdline, mmodule->mod_start, npages);
map_region((size_t) (mmodule->mod_start), (size_t) (mmodule->mod_start), npages, MAP_REMAP|MAP_KERNEL_SPACE);
npages = PAGE_FLOOR(mmodule->mod_end - mmodule->mod_start) >> PAGE_BITS;
kprintf("Map module %s at %#x (%u pages)\n", (char*)(size_t) mmodule->cmdline, mmodule->mod_start, npages);
map_region((size_t) (mmodule->mod_start), (size_t) (mmodule->mod_start), npages, MAP_REMAP);
}
}
#endif