/* * Copyright 2010 Stefan Lankes, Chair for Operating Systems, * RWTH Aachen University * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * This file is part of MetalSVM. */ /** * @file arch/x86/include/asm/page.h * @brief Definitions and functions related to paging * @author Stefan Lankes * @author Steffen Vogel * * This file defines the interface for paging as like structures related to paging. */ #ifndef __ARCH_PAGE_H__ #define __ARCH_PAGE_H__ #include #include /// Page offset bits #define PAGE_BITS 12 #ifdef CONFIG_X86_32 /// Number of page map indirections #define PAGE_MAP_LEVELS 2 /// Page map bits #define PAGE_MAP_BITS 10 /// Total operand width in bits #define BITS 32 /// Linear/virtual address width #define VIRT_BITS BITS /// Physical address width (we dont support PAE) #define PHYS_BITS BITS #elif defined(CONFIG_X86_64) /// Number of page map indirections #define PAGE_MAP_LEVELS 4 /// Page map bits #define PAGE_MAP_BITS 9 /// Total operand width in bits #define BITS 64 /// Linear/virtual address width #define VIRT_BITS 48 /// Physical address width (maximum value) #define PHYS_BITS 52 #endif /// The size of a single page in bytes #define PAGE_SIZE ( 1L << PAGE_BITS) /// The number of entries in a page map table #define PAGE_MAP_ENTRIES ( 1L << PAGE_MAP_BITS) /// Mask the page address #define PAGE_MASK (-1L << PAGE_BITS) /// Mask the entry in a page table #define PAGE_ENTRY_MASK (-1L << (PAGE_BITS-PAGE_MAP_BITS)) /// Mask for all flag bits in a page map entry (including ignored bits) #define PAGE_FLAGS_MASK (~(-1L << PAGE_BITS) | (-1L << VIRT_BITS)) /// Align to next page #define PAGE_FLOOR(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK) /// Align to page #define PAGE_CEIL(addr) ( (addr) & PAGE_MASK) /// Sign extension to get a valid canonical address (hack: by using aritmethic shifts) #define VIRT_SEXT(addr) ((ssize_t) addr << (BITS-VIRT_BITS) >> (BITS-VIRT_BITS)) // base addresses of page map tables #ifdef CONFIG_X86_32 #define PAGE_MAP_PGD 0xFFFFF000 #define PAGE_MAP_PGT 0xFFC00000 #elif defined(CONFIG_X86_64) #define PAGE_MAP_PML4 0xFFFFFFFFFFFFF000 #define PAGE_MAP_PDPT 0xFFFFFFFFFFE00000 #define PAGE_MAP_PGD 0xFFFFFFFFC0000000 #define PAGE_MAP_PGT 0xFFFFFF8000000000 #endif /// Page is present #define PG_PRESENT (1 << 0) /// Page is read- and writable #define PG_RW (1 << 1) /// Page is addressable from userspace #define PG_USER (1 << 2) /// Page write through is activated #define PG_PWT (1 << 3) /// Page cache is disabled #define PG_PCD (1 << 4) /// Page was recently accessed (set by CPU) #define PG_ACCESSED (1 << 5) /// Page is dirty due to recentwrite-access (set by CPU) #define PG_DIRTY (1 << 6) /// Huge page: 4MB (or 2MB, 1GB) #define PG_PSE (1 << 7) /// Page is part of the MPB (SCC specific entry) #define PG_MPE PG_PSE /// Page attribute table #define PG_PAT PG_PSE /// Global TLB entry (Pentium Pro and later) #define PG_GLOBAL (1 << 8) /// This virtual address range is used by SVM system as marked #define PG_SVM (1 << 9) #define PG_SVM_STRONG PG_SVM /// This virtual address range is used by SVM system as marked #define PG_SVM_LAZYRELEASE (1 << 10) /// Currently, no page frame is behind this page (only the MBP proxy) #define PG_SVM_INIT (1 << 11) /// Disable execution for this page #define PG_XD (1L << 63) /// This is a whole set of flags (PRESENT,RW,ACCESSED,DIRTY) for kernelspace tables #define PG_TABLE (PG_PRESENT|PG_RW|PG_XD) /// This is a whole set of flags (PRESENT,RW,GLOBAL) for kernelspace pages #define PG_PAGE (PG_PRESENT|PG_RW|PG_GLOBAL|PG_XD) /** @brief A single entry in a page map */ typedef size_t page_entry_t; /** @brief General page map structure * * This page map structure is a general type for all indirecton levels. * As all page map levels containing the same amount of entries. * All page maps must be page aligned! */ typedef struct page_map { page_entry_t entries[PAGE_MAP_ENTRIES]; } __attribute__ ((aligned (PAGE_SIZE))) page_map_t; /** @brief A callback type for the page map iterator * * @param entry A pointer to the current page map entry * @return * - 0 if we want to skip underlying page tables * - >0 if want to recurse into underlying page tables */ typedef int (*page_cb_t)(page_entry_t* entry, int level); /** @brief Converts a virtual address to a physical * * @param viraddr Virtual address to convert * @return physical address */ size_t virt_to_phys(size_t viraddr); /** @brief Allocates a virtual address space range of npages * * The address range with special flags (if given) will have the size of n pages. * * @param npages The range in page-granularity * @param flags further page flags * * @return The new range's address */ size_t vm_alloc(uint32_t npages, uint32_t flags); /** @brief Frees a range in the virtual address space * * @param addr Address of the range * @param npages Size of the range in pages * * @return * - 0 on success * - -EINVAL (-22) on failure. */ int vm_free(size_t addr, uint32_t npages); /** @brief Unmap the physical memory at a specific virtual address * * All Page table entries within this range will be marked as not present * and (in the case of userspace memory) the page usage of the task will be decremented. * * @param viraddr The range's virtual address * @param npages The range's size in pages * * @return * - 0 on success * - -EINVAL (-22) on failure. */ int unmap_region(size_t viraddr, uint32_t npages); /** @brief Mapping a physical mem-region to a virtual address * * Maps a physical memory region to a specific virtual address. * If the virtual address is zero, this functions allocates a valid virtual address on demand. * * @param viraddr Desired virtual address * @param phyaddr Physical address to map from * @param npages The region's size in number of pages * @param flags Further page flags * * @return * - A virtual address on success * - 0 on failure. */ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flags); /** @brief Sets up the environment and enables paging. * * - Installs the page handler IRQ * - sets up the whole page directory and page tables for the kernel space (virt adr = phys adr) * - maps VGA, multi boot info and initrd into kernel space * - writes to cr0 and cr3 register * - marks 'paging_enabled' var = 1 * - Registers kernel thread for task state switching * * @return returns * - 0 on success * - -ENOMEM (-12) on failure */ int arch_paging_init(void); /** @brief Returns the page directory of the boot task * * The boot task's page directory is a static array of page_dir_t type vars. * * @return Returns the address of the boot task's page dir array. */ page_map_t* get_boot_page_map(void); /** @brief Setup a new page directory for a new user-level task * * @param task Pointer to the task-specific task_t structure * @param copy If true: copy userspace pages and tables * * @return * - counter of allocated page tables * - -ENOMEM (-12) on failure */ int copy_page_map(struct task* task, int copy); /** @brief Delete all page map structures of the current task * * Puts PML4, PDPT, PGD, PGT tables back to buffer and * sets the task's page map pointer to NULL * * @return * - 0 on success * - -EINVAL (-22) on failure (in case PGD is still the boot-pgd). */ int drop_page_map(void); /** @brief Change the page permission in the page tables of the current task * * Applies given flags noted in the 'flags' parameter to * the range denoted by virtual start and end addresses. * * @param start Range's virtual start address * @param end Range's virtual end address * @param flags flags to apply * * @return * - 0 on success * - -EINVAL (-22) on failure. */ int change_page_permissions(size_t start, size_t end, uint32_t flags); /** @brief Dump mapped memory */ void page_dump(size_t start, size_t end); /** @brief Print stats about page flags * * @param reset Reset accessed and dirty bits in page tables */ void page_stats(size_t start, size_t end, int reset); #endif