From 883756d0b4177e951808340a000e56251acb26f1 Mon Sep 17 00:00:00 2001 From: Steffen Vogel Date: Fri, 28 Nov 2014 02:26:40 +0100 Subject: [PATCH] improved documentation --- arch/x86/mm/page.c | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/arch/x86/mm/page.c b/arch/x86/mm/page.c index f2de90f..5e9f373 100644 --- a/arch/x86/mm/page.c +++ b/arch/x86/mm/page.c @@ -24,6 +24,11 @@ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ +/** + * This is a 32/64 bit portable paging implementation for the x86 architecture + * using self-referenced page tables. + * See http://www.noteblok.net/2014/06/14/bachelor/ for a detailed description. + */ #include #include @@ -59,12 +64,10 @@ static size_t * other[PAGE_LEVELS] = { (size_t *) 0xFFFFE000 }; -/** Addresses of child/parent tables */ +/* Addresses of child/parent tables */ #define CHILD(map, lvl, vpn) &map[lvl-1][vpn<>PAGE_MAP_BITS] -/** @todo Does't handle huge pages for now - * @todo This will cause a pagefaut if addr isn't mapped! */ size_t page_virt_to_phys(size_t addr) { size_t vpn = addr >> PAGE_BITS; // virtual page number @@ -81,7 +84,7 @@ int page_map(size_t viraddr, size_t phyaddr, size_t npages, size_t bits) long vpn = viraddr >> PAGE_BITS; long first[PAGE_LEVELS], last[PAGE_LEVELS]; - // calculate index boundaries for page map traversal + /* Calculate index boundaries for page map traversal */ for (lvl=0; lvl> (lvl * PAGE_MAP_BITS); last[lvl] = (vpn+npages-1) >> (lvl * PAGE_MAP_BITS); @@ -89,8 +92,8 @@ int page_map(size_t viraddr, size_t phyaddr, size_t npages, size_t bits) spinlock_lock(&kslock); - /* We start now iterating through the entries - * beginning at the root table (PGD) */ + /* Start iterating through the entries + * beginning at the root table (PGD or PML4) */ for (lvl=PAGE_LEVELS-1; lvl>=0; lvl--) { for (vpn=first[lvl]; vpn<=last[lvl]; vpn++) { if (lvl) { /* PML4, PDPT, PGD */ @@ -135,6 +138,8 @@ int page_unmap(size_t viraddr, size_t npages) spinlock_lock(&kslock); + /* Start iterating through the entries. + * Only the PGT entries are removed. Tables remain allocated. */ for (vpn=start; vpn> PAGE_BITS; page_map(addr, addr, npages, PG_PRESENT | PG_RW | PG_GLOBAL); #ifdef CONFIG_VGA - // map video memory + /* Map video memory */ page_map(VIDEO_MEM_ADDR, VIDEO_MEM_ADDR, 1, PG_PRESENT | PG_RW | PG_PCD); #endif - // map multiboot information and modules + /* Map multiboot information and modules */ if (mb_info) { addr = (size_t) mb_info & PAGE_MASK; npages = PAGE_FLOOR(sizeof(*mb_info)) >> PAGE_BITS; @@ -197,7 +202,7 @@ int page_init() } } - // unmap all (identity mapped) pages with PG_BOOT flag in first PGT (boot_pgt) + /* Unmap bootstrap identity paging (see entry.asm, PG_BOOT) */ for (i=0; i