metalsvm/arch/x86/include/asm/page.h

247 lines
7.4 KiB
C
Raw Permalink Normal View History

/*
* Copyright 2010 Stefan Lankes, Chair for Operating Systems,
* RWTH Aachen University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This file is part of MetalSVM.
*/
/**
* @file arch/x86/include/asm/page.h
* @brief Definitions and functions related to paging
* @author Stefan Lankes
* @author Steffen Vogel <steffen.vogel@rwth-aachen.de>
*
* This file defines the interface for paging as like structures related to paging.
*/
#ifndef __ARCH_PAGE_H__
#define __ARCH_PAGE_H__
#include <metalsvm/stddef.h>
#include <metalsvm/stdlib.h>
/// Page offset bits
#define PAGE_BITS 12
/// The size of a single page in bytes
#define PAGE_SIZE ( 1L << PAGE_BITS)
#ifdef CONFIG_X86_32
/// Total operand width in bits
#define BITS 32
/// Linear/virtual address width
#define VIRT_BITS BITS
/// Physical address width (we dont support PAE)
#define PHYS_BITS BITS
/// Page map bits
#define PAGE_MAP_BITS 10
/// Number of page map indirections
#define PAGE_MAP_LEVELS 2
/// Mask the page address without page map flags
#define PAGE_MASK 0xFFFFF000
#elif defined(CONFIG_X86_64)
/// Total operand width in bits
#define BITS 64
/// Linear/virtual address width
#define VIRT_BITS 48
/// Physical address width (maximum value)
#define PHYS_BITS 52
/// Page map bits
#define PAGE_MAP_BITS 9
/// Number of page map indirections
#define PAGE_MAP_LEVELS 4
/// Mask the page address without page map flags
#define PAGE_MASK 0x000FFFFFFFFFF000
#endif
/// The number of entries in a page map table
#define PAGE_MAP_ENTRIES (1L << PAGE_MAP_BITS)
2014-05-14 15:13:11 +02:00
// Base addresses of the self-mapped pagetables
#ifdef CONFIG_X86_32
#define PAGE_MAP_PGD 0xFFFFF000
#define PAGE_MAP_PGT 0xFFC00000
#elif defined(CONFIG_X86_64)
#define PAGE_MAP_PML4 0xFFFFFFFFFFFFF000
#define PAGE_MAP_PDPT 0xFFFFFFFFFFE00000
#define PAGE_MAP_PGD 0xFFFFFFFFC0000000
#define PAGE_MAP_PGT 0xFFFFFF8000000000
#endif
/// Align to next page
#define PAGE_FLOOR(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
/// Align to page
#define PAGE_CEIL(addr) ( (addr) & PAGE_MASK)
2014-05-14 15:13:11 +02:00
// Canonical address format
#ifdef CONFIG_X86_32
2014-05-14 15:13:11 +02:00
#define CANONICAL(addr) (addr)
#elif defined(CONFIG_X86_64)
#define CANONICAL(addr) sign_extend(addr, VIRT_BITS)
#endif
/// Page is present
#define PG_PRESENT (1 << 0)
/// Page is read- and writable
#define PG_RW (1 << 1)
/// Page is addressable from userspace
#define PG_USER (1 << 2)
/// Page write through is activated
#define PG_PWT (1 << 3)
/// Page cache is disabled
#define PG_PCD (1 << 4)
/// Page was recently accessed (set by CPU)
#define PG_ACCESSED (1 << 5)
/// Page is dirty due to recentwrite-access (set by CPU)
#define PG_DIRTY (1 << 6)
/// Huge page: 4MB (or 2MB, 1GB)
#define PG_PSE (1 << 7)
/// Page is part of the MPB (SCC specific entry)
#define PG_MPE PG_PSE
/// Page attribute table
#define PG_PAT PG_PSE
/// Global TLB entry (Pentium Pro and later)
#define PG_GLOBAL (1 << 8)
2011-08-15 06:36:38 -07:00
/// This virtual address range is used by SVM system as marked
#define PG_SVM (1 << 9)
#define PG_SVM_STRONG PG_SVM
/// This virtual address range is used by SVM system as marked
#define PG_SVM_LAZYRELEASE (1 << 10)
/// Currently, no page frame is behind this page (only the MBP proxy)
#define PG_SVM_INIT (1 << 11)
/// Disable execution for this page
#define PG_XD (1L << 63)
/// This is a whole set of flags (PRESENT,RW,ACCESSED,DIRTY) for kernelspace tables
#define PG_TABLE (PG_PRESENT|PG_RW|PG_XD)
/// This is a whole set of flags (PRESENT,RW,GLOBAL) for kernelspace pages
#define PG_PAGE (PG_PRESENT|PG_RW|PG_GLOBAL|PG_XD)
/** @brief A single entry in a page map
*
* Usually used as a pointer to a mapped page map entry.
*/
typedef size_t page_entry_t;
/** @brief Converts a virtual address to a physical
*
* @param viraddr Virtual address to convert
* @return physical address
*/
size_t virt_to_phys(size_t viraddr);
/** @brief Unmap the physical memory at a specific virtual address
*
* All Page table entries within this range will be marked as not present
* and (in the case of userspace memory) the page usage of the task will be decremented.
*
* @param viraddr The range's virtual address
* @param npages The range's size in pages
*
* @return
* - 0 on success
* - -EINVAL (-22) on failure.
*/
int unmap_region(size_t viraddr, uint32_t npages);
/** @brief Mapping a physical mem-region to a virtual address
*
* Maps a physical memory region to a specific virtual address.
* If the virtual address is zero, this functions allocates a valid virtual address on demand.
*
* @param viraddr Desired virtual address
* @param phyaddr Physical address to map from
2011-08-15 06:36:38 -07:00
* @param npages The region's size in number of pages
* @param flags Further page flags
*
* @return
* - A virtual address on success
* - 0 on failure.
*/
size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flags);
/** @brief Sets up the environment and enables paging.
*
* - Installs the page handler IRQ
* - sets up the whole page directory and page tables for the kernel space (virt adr = phys adr)
* - maps VGA, multi boot info and initrd into kernel space
* - writes to cr0 and cr3 register
* - marks 'paging_enabled' var = 1
* - Registers kernel thread for task state switching
*
* @return returns
* - 0 on success
* - -ENOMEM (-12) on failure
*/
int arch_paging_init(void);
/** @brief Returns the page directory of the boot task
*
* The boot task's page directory is a static array of page_dir_t type vars.
*
* @return Returns the address of the boot task's page dir array.
*/
page_entry_t* get_boot_page_map(void);
/** @brief Setup a new page directory for a new user-level task
*
* @param task Pointer to the task-specific task_t structure
* @param copy If true: copy userspace pages and tables
*
* @return
* - counter of allocated page tables
* - -ENOMEM (-12) on failure
*/
int copy_page_map(struct task* task, int copy);
/** @brief Deletes all user page map structures of the current task
*
* All allocated physical page frames are released in the bitmap
* The task->page_map is replaces by the boot_page_map()
*
* @return
* - 0 on success
* - -EINVAL (-22) on failure (in case PGD is still the boot-pgd).
*/
int drop_page_map(void);
/** @brief Change the page permission in the page tables of the current task
*
* Applies given flags noted in the 'flags' parameter to
* the range denoted by virtual start and end addresses.
*
* @param start Range's virtual start address
* @param end Range's virtual end address
* @param flags flags to apply
*
* @return
* - 0 on success
* - -EINVAL (-22) on failure.
*/
int set_page_flags(size_t viraddr, uint32_t npages, int flags);
/** @brief Dump mapped memory
*
* @param mask Only watch for changes in these page flags (PG_PRESENT is set by default)
*/
void page_dump(size_t mask);
/** @brief Print stats about page flags
*
* @param reset Reset accessed and dirty bits in page tables
*/
void page_stats(int reset);
#endif