metalsvm/arch/x86/include/asm/page_helpers.h

166 lines
4.5 KiB
C
Raw Normal View History

/*
* Copyright 2014 Steffen Vogel, Chair for Operating Systems,
* RWTH Aachen University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This file is part of MetalSVM.
*/
/**
* @file arch/x86/include/asm/page_helpers.h
* @brief Some small helper functions declared as static inline
* @author Stefan Lankes
* @author Steffen Vogel <steffen.vogel@rwth-aachen.de>
*/
#ifndef __ARCH_PAGE_HELPERS_H__
#define __ARCH_PAGE_HELPERS_H__
#include <metalsvm/page.h>
/** @brief Sign extending a integer
*
* @param addr The integer to extend
* @param bits The width if addr which should be extended
* @return The extended integer
*/
static inline size_t sign_extend(ssize_t addr, int bits)
{
return (addr << -bits) >> -bits; // sign bit is maintained during arithmetic right shift
}
/** @brief */
static inline page_entry_t* get_child_entry(page_entry_t *entry)
{
size_t child = (size_t) entry;
child <<= PAGE_MAP_BITS;
#ifdef CONFIG_X86_32
return (page_entry_t*) child;
#elif defined(CONFIG_X86_64)
return (page_entry_t*) sign_extend(child, VIRT_BITS);
#endif
}
/** @brief */
static inline page_entry_t* get_parent_entry(page_entry_t *entry)
{
size_t parent = (size_t) entry;
parent >>= PAGE_MAP_BITS;
parent |= (PAGE_MAP_ENTRIES-1) << (PAGE_MAP_LEVELS * PAGE_MAP_BITS + 3); // TODO
#ifdef CONFIG_X86_32
return (page_entry_t*) (parent & ~0x3);
#elif defined(CONFIG_X86_64)
return (page_entry_t*) sign_extend(parent & ~0x7, VIRT_BITS);
#endif
}
/** @brief Get the corresponding page map entry to a given virtual address
*
* Please note: this implementation requires that the tables are mapped
* at the end of VAS!
*/
static inline page_entry_t* virt_to_entry(size_t addr, int level)
{
do {
addr >>= PAGE_MAP_BITS;
addr |= (PAGE_MAP_ENTRIES-1) << (PAGE_MAP_LEVELS * PAGE_MAP_BITS + 3); // TODO
} while (level--);
#ifdef CONFIG_X86_32
return (page_entry_t*) (addr & ~0x3);
#elif defined(CONFIG_X86_64)
return (page_entry_t*) sign_extend(addr & ~0x7, VIRT_BITS);
#endif
}
/** @brief Get the corresponding virtual address to a page map entry */
static inline size_t entry_to_virt(page_entry_t* entry, int level)
{
size_t addr = (size_t) entry;
addr <<= (level+1) * PAGE_MAP_BITS;
#ifdef CONFIG_X86_32
return addr;
#elif defined(CONFIG_X86_64)
return sign_extend(addr, VIRT_BITS); // sign extend
#endif
}
/** @brief Converts a virtual address to a physical
*
* A non mapped virtual address causes a pagefault!
*
* @param addr Virtual address to convert
* @return physical address
*/
inline size_t virt_to_phys(size_t addr)
{
page_entry_t* entry = virt_to_entry(addr, 0); // get the PGT entry
size_t off = addr & ~PAGE_MASK; // offset within page
size_t phy = *entry & PAGE_MASK; // physical page frame number
return phy | off;
}
/** @brief Update page table bits (PG_*) by using arch independent flags (MAP_*) */
static size_t page_bits(int flags)
{
size_t bits = PG_PRESENT | PG_RW | PG_XD | PG_GLOBAL;
if (flags & MAP_NO_ACCESS) bits &= ~PG_PRESENT;
if (flags & MAP_READ_ONLY) bits &= ~PG_RW;
#ifdef CONFIG_X86_64
if (flags & MAP_CODE) bits &= ~PG_XD;
#endif
if (flags & MAP_USER_SPACE) bits &= ~PG_GLOBAL;
if (flags & MAP_USER_SPACE) bits |= PG_USER;
if (flags & MAP_WT) bits |= PG_PWT;
if (flags & MAP_NO_CACHE) bits |= PG_PCD;
if (flags & MAP_MPE) bits |= PG_MPE;
if (flags & MAP_SVM_INIT) bits |= PG_SVM_INIT;
if (flags & MAP_SVM_LAZYRELEASE) bits |= PG_SVM_LAZYRELEASE;
if (flags & MAP_SVM_STRONG) bits |= PG_SVM_STRONG;
return bits;
}
// TODO: test
size_t get_page_flags(size_t viraddr)
{
page_entry_t* entry = virt_to_entry(viraddr, 0);
size_t flags = *entry & ~PAGE_MASK;
int i;
for (i=1; i<PAGE_MAP_LEVELS; i++) {
entry = virt_to_entry(viraddr, i);
#ifdef CONFIG_X86_64
flags |= (*entry & PG_XD);
#endif
flags &= (*entry & PG_USER) | ~PG_USER;
flags &= (*entry & PG_RW) | ~PG_RW;
flags &= (*entry & PG_USER) | ~PG_USER;
}
return flags;
}
#endif