/* * Copyright 2010 Stefan Lankes, Chair for Operating Systems, * RWTH Aachen University * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * This file is part of MetalSVM. */ /* * Defines the interface for and structures relating to paging. */ #ifndef __ARCH_PAGE_H__ #define __ARCH_PAGE_H__ #include //#include #include #define _PAGE_BIT_PRESENT 0 /* is present */ #define _PAGE_BIT_RW 1 /* writeable */ #define _PAGE_BIT_USER 2 /* userspace addressable */ #define _PAGE_BIT_PWT 3 /* page write through */ #define _PAGE_BIT_PCD 4 /* page cache disabled */ #define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */ #define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */ #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */ #define _PAGE_BIT_PAT 7 /* on 4KB pages */ #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ #define _PAGE_BIT_RESERVED 9 /* mark a virtual address range as reserved */ #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ #define PG_PRESENT (1 << _PAGE_BIT_PRESENT) #define PG_RW (1 << _PAGE_BIT_RW) #define PG_USER (1 << _PAGE_BIT_USER) #define PG_PWT (1 << _PAGE_BIT_PWT) #define PG_PCD (1 << _PAGE_BIT_PCD) #define PG_ACCESSED (1 << _PAGE_BIT_ACCESSED) #define PG_DIRTY (1 << _PAGE_BIT_DIRTY) #define PG_PSE (1 << _PAGE_BIT_PSE) #define PG_GLOBAL (1 << _PAGE_BIT_GLOBAL) #define PG_RESERVED (1 << _PAGE_BIT_RESERVED) #define PG_PAT (1 << _PAGE_BIT_PAT) #define PG_PAT_LARGE (1 << _PAGE_BIT_PAT_LARGE) #define KERN_TABLE (PG_PRESENT|PG_RW|PG_ACCESSED|PG_DIRTY) #define USER_TABLE (PG_PRESENT|PG_RW|PG_ACCESSED|PG_DIRTY|PG_USER) #define KERN_PAGE (PG_PRESENT|PG_RW|PG_GLOBAL) #define USER_PAGE (PG_PRESENT|PG_RW|PG_USER) typedef struct page_table { uint32_t entries[1024]; } page_table_t __attribute__ ((aligned (4096))); typedef struct page_dir { uint32_t entries[1024]; } page_dir_t __attribute__ ((aligned (4096))); /* * Converts a virtual address to a physical */ size_t virt_to_phys(size_t); /* * Allocates an virtual address space range of npages */ size_t vm_alloc(uint32_t npages, uint32_t flags); /* * Frees a range in the virtual address space */ int vm_free(size_t addr, uint32_t npages); /* * Maps a physical memory region at a specific virtual address. * If the virtual address is zero, this functions allocates a valid virtual address on demand. */ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t pages, uint32_t type); /* * Sets up the environment and enables paging. */ int arch_paging_init(void); /* * Returns the page directory of the boot task */ page_dir_t* get_boot_pgd(void); /* * Setup a new page directory for a new user-level task */ int create_pgd(task_t* task, int copy); /* * Delete page directory and its page tables */ int drop_pgd(void); /* * Change the page permission in the page tables of the current task */ int change_page_permissions(size_t start, size_t end, uint32_t flags); #endif