metalsvm/arch/x86/include/asm/page_helpers.h
2014-05-14 15:13:11 +02:00

161 lines
4.3 KiB
C

/*
* Copyright 2014 Steffen Vogel, Chair for Operating Systems,
* RWTH Aachen University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This file is part of MetalSVM.
*/
/**
* @file arch/x86/include/asm/page_helpers.h
* @brief Some small helper functions declared as static inline
* @author Stefan Lankes
* @author Steffen Vogel <steffen.vogel@rwth-aachen.de>
*/
#ifndef __ARCH_PAGE_HELPERS_H__
#define __ARCH_PAGE_HELPERS_H__
#include <metalsvm/page.h>
/** @brief Sign extending a integer
*
* @param addr The integer to extend
* @param bits The width if addr which should be extended
* @return The extended integer
*/
static inline size_t sign_extend(ssize_t addr, int bits)
{
int shift = BITS - bits;
return (addr << shift) >> shift; // sign bit gets copied during arithmetic right shift
}
/** @brief Get the base address of the child table
*
* @param entry The parent entry
* @return The child entry
*/
static inline page_entry_t* get_child_entry(page_entry_t *entry)
{
size_t child = (size_t) entry;
child <<= PAGE_MAP_BITS;
return (page_entry_t*) CANONICAL(child);
}
/** @brief Get the base address of the parent entry
*
* @param entry The child entry
* @return The parent entry
*/
static inline page_entry_t* get_parent_entry(page_entry_t *entry)
{
ssize_t parent = (size_t) entry;
parent >>= PAGE_MAP_BITS;
parent |= PAGE_MAP_PGT;
parent &= ~(sizeof(size_t) - 1); // align to page_entry_t
return (page_entry_t*) CANONICAL(parent);
}
/** @brief Get the corresponding page map entry to a given virtual address
*
* Please note: this implementation requires that the tables are mapped
* at the end of VAS!
*/
static inline page_entry_t* virt_to_entry(ssize_t addr, int level)
{
addr >>= PAGE_MAP_BITS;
addr |= PAGE_MAP_PGT;
addr >>= level * PAGE_MAP_BITS;
addr &= ~(sizeof(size_t) - 1); // align to page_entry_t
return (page_entry_t*) CANONICAL(addr);
}
/** @brief Get the corresponding virtual address to a page map entry */
static inline size_t entry_to_virt(page_entry_t* entry, int level)
{
size_t addr = (size_t) entry;
addr <<= (level+1) * PAGE_MAP_BITS;
return CANONICAL(addr);
}
/** @brief Converts a virtual address to a physical
*
* A non mapped virtual address causes a pagefault!
*
* @param addr Virtual address to convert
* @return physical address
*/
inline size_t virt_to_phys(size_t addr)
{
page_entry_t* entry = virt_to_entry(addr, 0); // get the PGT entry
size_t off = addr & ~PAGE_MASK; // offset within page
size_t phy = *entry & PAGE_MASK; // physical page frame number
return phy | off;
}
/** @brief Update page table bits (PG_*) by using arch independent flags (MAP_*) */
static size_t page_bits(int flags)
{
size_t bits = PG_PRESENT | PG_RW | PG_XD | PG_GLOBAL;
if (flags & MAP_NO_ACCESS) bits &= ~PG_PRESENT;
if (flags & MAP_READ_ONLY) bits &= ~PG_RW;
#ifdef CONFIG_X86_64
if (flags & MAP_CODE) bits &= ~PG_XD;
#endif
if (flags & MAP_USER_SPACE) bits &= ~PG_GLOBAL;
if (flags & MAP_USER_SPACE) bits |= PG_USER;
if (flags & MAP_WT) bits |= PG_PWT;
if (flags & MAP_NO_CACHE) bits |= PG_PCD;
if (flags & MAP_MPE) bits |= PG_MPE;
if (flags & MAP_SVM_INIT) bits |= PG_SVM_INIT;
if (flags & MAP_SVM_LAZYRELEASE) bits |= PG_SVM_LAZYRELEASE;
if (flags & MAP_SVM_STRONG) bits |= PG_SVM_STRONG;
return bits;
}
// TODO: test
size_t get_page_flags(size_t viraddr)
{
page_entry_t* entry = virt_to_entry(viraddr, 0);
size_t flags = *entry & ~PAGE_MASK;
int i;
for (i=1; i<PAGE_MAP_LEVELS; i++) {
entry = virt_to_entry(viraddr, i);
#ifdef CONFIG_X86_64
flags |= (*entry & PG_XD);
#endif
flags &= (*entry & PG_USER) | ~PG_USER;
flags &= (*entry & PG_RW) | ~PG_RW;
flags &= (*entry & PG_USER) | ~PG_USER;
}
return flags;
}
#endif