From a59cceb15b61b357f8cfadb947e0bf1daae10edf Mon Sep 17 00:00:00 2001 From: Stefan Lankes Date: Sun, 21 Feb 2016 17:07:00 +0100 Subject: [PATCH] mark new page table entries as accessed => good for the page structure cache --- hermit/arch/x86/kernel/entry.asm | 12 ++++++------ hermit/arch/x86/mm/page.c | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/hermit/arch/x86/kernel/entry.asm b/hermit/arch/x86/kernel/entry.asm index f94eeadd9..0712eded1 100644 --- a/hermit/arch/x86/kernel/entry.asm +++ b/hermit/arch/x86/kernel/entry.asm @@ -89,17 +89,17 @@ align 4 ; Bootstrap page tables are used during the initialization. align 4096 boot_pml4: - DQ boot_pdpt + 0x7 ; PG_PRESENT | PG_RW | PG_USER + DQ boot_pdpt + 0x27 ; PG_PRESENT | PG_RW | PG_USER | PG_ACCESSED times 510 DQ 0 ; PAGE_MAP_ENTRIES - 2 - DQ boot_pml4 + 0x203 ; PG_PRESENT | PG_RW | PG_SELF (self-reference) + DQ boot_pml4 + 0x223 ; PG_PRESENT | PG_RW | PG_ACCESSED | PG_SELF (self-reference) boot_pdpt: - DQ boot_pgd + 0x3 ; PG_PRESENT | PG_RW + DQ boot_pgd + 0x23 ; PG_PRESENT | PG_RW | PG_ACCESSED times 510 DQ 0 ; PAGE_MAP_ENTRIES - 2 - DQ boot_pml4 + 0x203 ; PG_PRESENT | PG_RW | PG_SELF (self-reference) + DQ boot_pml4 + 0x223 ; PG_PRESENT | PG_RW | PG_ACCESSED | PG_SELF (self-reference) boot_pgd: - DQ boot_pgt + 0x3 ; PG_PRESENT | PG_RW + DQ boot_pgt + 0x23 ; PG_PRESENT | PG_RW | PG_ACCESSED times 510 DQ 0 ; PAGE_MAP_ENTRIES - 2 - DQ boot_pml4 + 0x203 ; PG_PRESENT | PG_RW | PG_SELF (self-reference) + DQ boot_pml4 + 0x223 ; PG_PRESENT | PG_RW | PG_ACCESSED | PG_SELF (self-reference) boot_pgt: times 512 DQ 0 diff --git a/hermit/arch/x86/mm/page.c b/hermit/arch/x86/mm/page.c index d1185ae2f..6f6d848cd 100644 --- a/hermit/arch/x86/mm/page.c +++ b/hermit/arch/x86/mm/page.c @@ -142,9 +142,9 @@ int page_map(size_t viraddr, size_t phyaddr, size_t npages, size_t bits) /* Reference the new table within its parent */ #if 0 - self[lvl][vpn] = phyaddr | bits | PG_PRESENT | PG_USER | PG_RW; + self[lvl][vpn] = phyaddr | bits | PG_PRESENT | PG_USER | PG_RW | PG_ACCESSED; #else - self[lvl][vpn] = (phyaddr | bits | PG_PRESENT | PG_USER | PG_RW) & ~PG_XD; + self[lvl][vpn] = (phyaddr | bits | PG_PRESENT | PG_USER | PG_RW | PG_ACCESSED) & ~PG_XD; #endif /* Fill new table with zeros */ @@ -158,7 +158,7 @@ int page_map(size_t viraddr, size_t phyaddr, size_t npages, size_t bits) if (self[lvl][vpn] & PG_PRESENT) flush = 1; - self[lvl][vpn] = phyaddr | bits | PG_PRESENT; + self[lvl][vpn] = phyaddr | bits | PG_PRESENT | PG_ACCESSED; if (flush) /* There's already a page mapped at this address. @@ -269,11 +269,11 @@ int page_map_copy(task_t *dest) dest->page_map |= PG_PRESENT; spinlock_irqsave_lock(curr_task->page_lock); - self[PAGE_LEVELS-1][PAGE_MAP_ENTRIES-2] = dest->page_map | PG_PRESENT | PG_SELF | PG_RW; + self[PAGE_LEVELS-1][PAGE_MAP_ENTRIES-2] = dest->page_map | PG_PRESENT | PG_SELF | PG_ACCESSED | PG_RW; int ret = traverse(PAGE_LEVELS-1, 0); - other[PAGE_LEVELS-1][PAGE_MAP_ENTRIES-1] = dest->page_map | PG_PRESENT | PG_SELF | PG_RW; + other[PAGE_LEVELS-1][PAGE_MAP_ENTRIES-1] = dest->page_map | PG_PRESENT | PG_SELF | PG_ACCESSED | PG_RW; self [PAGE_LEVELS-1][PAGE_MAP_ENTRIES-2] = 0; spinlock_irqsave_unlock(curr_task->page_lock); @@ -316,7 +316,7 @@ void page_fault_handler(struct state *s) * do we have a valid page table entry? => flush TLB and return */ if (check_pagetables(viraddr)) { - tlb_flush_one_page(viraddr); + //tlb_flush_one_page(viraddr); spinlock_irqsave_unlock(task->page_lock); return; }