227 lines
7.3 KiB
C
227 lines
7.3 KiB
C
/*
|
|
* Copyright 2010 Stefan Lankes, Chair for Operating Systems,
|
|
* RWTH Aachen University
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*
|
|
* This file is part of MetalSVM.
|
|
*/
|
|
|
|
/**
|
|
* @file arch/x86/include/asm/page.h
|
|
* @brief Definitions and functions related to paging
|
|
* @author Stefan Lankes
|
|
*
|
|
* This file defines the interface for paging as like structures related to paging.
|
|
*/
|
|
|
|
#ifndef __ARCH_PAGE_H__
|
|
#define __ARCH_PAGE_H__
|
|
|
|
#include <metalsvm/stddef.h>
|
|
#include <metalsvm/stdlib.h>
|
|
|
|
// Bit positions in paging structures
|
|
#define _PAGE_BIT_PRESENT 0
|
|
#define _PAGE_BIT_RW 1
|
|
#define _PAGE_BIT_USER 2
|
|
#define _PAGE_BIT_PWT 3
|
|
#define _PAGE_BIT_PCD 4
|
|
#define _PAGE_BIT_ACCESSED 5
|
|
#define _PAGE_BIT_DIRTY 6
|
|
#define _PAGE_BIT_PS 7
|
|
#define _PAGE_BIT_PAT 7
|
|
#define _PAGE_BIT_GLOBAL 8
|
|
#define _PAGE_BIT_SVM_STRONG 9
|
|
#define _PAGE_BIT_SVM_LAZYRELEASE 10
|
|
#define _PAGE_BIT_SVM_INIT 11
|
|
#define _PAGE_BIT_XD 63
|
|
|
|
/// Page is present
|
|
#define PG_PRESENT (1 << _PAGE_BIT_PRESENT)
|
|
/// Page is read- and writable
|
|
#define PG_RW (1 << _PAGE_BIT_RW)
|
|
/// Page is addressable from userspace
|
|
#define PG_USER (1 << _PAGE_BIT_USER)
|
|
/// Page write through is activated
|
|
#define PG_PWT (1 << _PAGE_BIT_PWT)
|
|
/// Page cache is disabled
|
|
#define PG_PCD (1 << _PAGE_BIT_PCD)
|
|
/// Page was recently accessed (set by CPU)
|
|
#define PG_ACCESSED (1 << _PAGE_BIT_ACCESSED)
|
|
/// Page is dirty due to recentwrite-access (set by CPU)
|
|
#define PG_DIRTY (1 << _PAGE_BIT_DIRTY)
|
|
/// Page size: 1GB, 4MB or 2MB (depends on paging level and cpu mode)
|
|
#define PG_PS (1 << _PAGE_BIT_PS)
|
|
/// Page is part of the MPB (SCC specific entry)
|
|
#define PG_MPE PG_PS
|
|
/// Global TLB entry (Pentium Pro and later)
|
|
#define PG_GLOBAL (1 << _PAGE_BIT_GLOBAL)
|
|
/// Page Attribute Table
|
|
#define PG_PAT (1 << _PAGE_BIT_PAT)
|
|
/// This virtual address range is used by SVM system as marked
|
|
#define PG_SVM PG_SVM_STRONG
|
|
#define PG_SVM_STRONG (1 << _PAGE_BIT_SVM_STRONG)
|
|
/// This virtual address range is used by SVM system as marked
|
|
#define PG_SVM_LAZYRELEASE (1 << _PAGE_BIT_SVM_LAZYRELEASE)
|
|
/// Currently, no page frame is behind this page (only the MBP proxy)
|
|
#define PG_SVM_INIT (1 << _PAGE_BIT_SVM_INIT)
|
|
/// Page is disabled for code execution (64bit only)
|
|
#define PG_XD (1 << _PAGE_BIT_XD)
|
|
|
|
/// This is a whole set of flags (PRESENT,RW,ACCESSED,DIRTY) for kernelspace tables
|
|
#define KERN_TABLE (PG_PRESENT|PG_RW|PG_ACCESSED|PG_DIRTY)
|
|
/// This is a whole set of flags (PRESENT,RW,ACCESSED,DIRTY,USER) for userspace tables
|
|
#define USER_TABLE (PG_PRESENT|PG_RW|PG_ACCESSED|PG_DIRTY|PG_USER)
|
|
/// This is a whole set of flags (PRESENT,RW,GLOBAL) for kernelspace pages
|
|
#define KERN_PAGE (PG_PRESENT|PG_RW|PG_GLOBAL)
|
|
/// This is a whole set of flags (PRESENT,RW,USER) for userspace pages
|
|
#define USER_PAGE (PG_PRESENT|PG_RW|PG_USER)
|
|
|
|
#ifdef CONFIG_X86_32
|
|
/// On a 32-bit system, each page map structure consists of 1024 entries (= 2^10)
|
|
#define MAP_ENTRIES 1024
|
|
#elif defined(CONFIG_X86_64)
|
|
/// On a 64-bit system, each page map structure consists of 512 entries (= 2^9)
|
|
#define MAP_ENTRIES 512
|
|
#endif
|
|
|
|
/** @brief General page map structure
|
|
*
|
|
* This page map structure is a general type for all indirecton levels.\n
|
|
* As all page map levels containing the same amount of entries.
|
|
*/
|
|
typedef struct page_map {
|
|
size_t entries[MAP_ENTRIES];
|
|
} __attribute__ ((aligned (4096))) page_map_t;
|
|
|
|
/** @brief Converts a virtual address to a physical
|
|
*
|
|
* @param viraddr Virtual address to convert
|
|
* @return physical address
|
|
*/
|
|
size_t virt_to_phys(size_t viraddr);
|
|
|
|
/** @brief Allocates a virtual address space range of npages
|
|
*
|
|
* The address range with special flags (if given) will have the size of n pages.
|
|
*
|
|
* @param npages The range in page-granularity
|
|
* @param flags further page flags
|
|
*
|
|
* @return The new range's address
|
|
*/
|
|
size_t vm_alloc(uint32_t npages, uint32_t flags);
|
|
|
|
/** @brief Frees a range in the virtual address space
|
|
*
|
|
* @param addr Address of the range
|
|
* @param npages Size of the range in pages
|
|
*
|
|
* @return
|
|
* - 0 on success
|
|
* - -EINVAL (-22) on failure.
|
|
*/
|
|
int vm_free(size_t addr, uint32_t npages);
|
|
|
|
/** @brief Unmap the physical memory at a specific virtual address
|
|
*
|
|
* All Page table entries within this range will be marked as not present
|
|
* and (in the case of userspace memory) the page usage of the task will be decremented.
|
|
*
|
|
* @param viraddr The range's virtual address
|
|
* @param npages The range's size in pages
|
|
*
|
|
* @return
|
|
* - 0 on success
|
|
* - -EINVAL (-22) on failure.
|
|
*/
|
|
int unmap_region(size_t viraddr, uint32_t npages);
|
|
|
|
/** @brief Mapping a physical mem-region to a virtual address
|
|
*
|
|
* Maps a physical memory region to a specific virtual address.
|
|
* If the virtual address is zero, this functions allocates a valid virtual address on demand.
|
|
*
|
|
* @param viraddr Desired virtual address
|
|
* @param phyaddr Physical address to map from
|
|
* @param npages The region's size in number of pages
|
|
* @param flags Further page flags
|
|
*
|
|
* @return
|
|
* - A virtual address on success
|
|
* - 0 on failure.
|
|
*/
|
|
size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flags);
|
|
|
|
/** @brief Sets up the environment and enables paging.
|
|
*
|
|
* - Installs the page handler IRQ
|
|
* - sets up the whole page directory and page tables for the kernel space (virt adr = phys adr)
|
|
* - maps VGA, multi boot info and initrd into kernel space
|
|
* - writes to cr0 and cr3 register
|
|
* - marks 'paging_enabled' var = 1
|
|
* - Registers kernel thread for task state switching
|
|
*
|
|
* @return returns
|
|
* - 0 on success
|
|
* - -ENOMEM (-12) on failure
|
|
*/
|
|
int arch_paging_init(void);
|
|
|
|
/** @brief Returns the page directory of the boot task
|
|
*
|
|
* The boot task's page directory is a static array of page_dir_t type vars.
|
|
*
|
|
* @return Returns the address of the boot task's page dir array.
|
|
*/
|
|
page_map_t* get_boot_page_map(void);
|
|
|
|
/** @brief Setup a new page directory for a new user-level task
|
|
*
|
|
* @param task Pointer to the task-specific task_t structure
|
|
* @param copy If true: PGD will be a copy of the kernel's address space PGD
|
|
*
|
|
* @return
|
|
* - counter of allocated page tables
|
|
* - -ENOMEM (-12) on failure
|
|
*/
|
|
int create_page_map(task_t* task, int copy);
|
|
|
|
/** @brief Delete all page map structures of the current task
|
|
*
|
|
* Puts PML4, PDPT, PGD, PGT tables back to buffer and
|
|
* sets the task's page map pointer to NULL
|
|
*
|
|
* @return
|
|
* - 0 on success
|
|
* - -EINVAL (-22) on failure (in case PGD is still the boot-pgd).
|
|
*/
|
|
int drop_page_map(void);
|
|
|
|
/** @brief Change the page permission in the page tables of the current task
|
|
*
|
|
* Applies given flags noted in the 'flags' parameter to
|
|
* the range denoted by virtual start and end addresses.
|
|
*
|
|
* @param start Range's virtual start address
|
|
* @param end Range's virtual end address
|
|
* @param flags flags to apply
|
|
*
|
|
* @return
|
|
* - 0 on success
|
|
* - -EINVAL (-22) on failure.
|
|
*/
|
|
int change_page_permissions(size_t start, size_t end, uint32_t flags);
|
|
|
|
#endif
|