remove obsolte code and switch back to Steffen's revised page handling
This commit is contained in:
parent
f1dd432a53
commit
7f8e4d2934
9 changed files with 51 additions and 359 deletions
|
@ -54,7 +54,7 @@
|
|||
/// Physical address width (we dont support PAE)
|
||||
#define PHYS_BITS BITS
|
||||
/// Page map bits
|
||||
#define PAGE_MAP_BITS 10
|
||||
#define PAGE_MAP_BITS 10
|
||||
/// Number of page map indirections
|
||||
#define PAGE_LEVELS 2
|
||||
|
||||
|
@ -69,9 +69,6 @@
|
|||
/// Align to page
|
||||
#define PAGE_CEIL(addr) ( (addr) & PAGE_MASK)
|
||||
|
||||
/// Canonical address format
|
||||
#define CANONICAL(addr) (addr)
|
||||
|
||||
/// Page is present
|
||||
#define PG_PRESENT (1 << 0)
|
||||
/// Page is read- and writable
|
||||
|
@ -104,36 +101,6 @@
|
|||
*/
|
||||
size_t virt_to_phys(size_t vir);
|
||||
|
||||
/** @brief Unmap the physical memory at a specific virtual address
|
||||
*
|
||||
* All Page table entries within this range will be marked as not present
|
||||
* and (in the case of userspace memory) the page usage of the task will be decremented.
|
||||
*
|
||||
* @param viraddr The range's virtual address
|
||||
* @param npages The range's size in pages
|
||||
*
|
||||
* @return
|
||||
* - 0 on success
|
||||
* - -EINVAL (-22) on failure.
|
||||
*/
|
||||
int unmap_region(size_t viraddr, uint32_t npages);
|
||||
|
||||
/** @brief Mapping a physical mem-region to a virtual address
|
||||
*
|
||||
* Maps a physical memory region to a specific virtual address.
|
||||
* If the virtual address is zero, this functions allocates a valid virtual address on demand.
|
||||
*
|
||||
* @param viraddr Desired virtual address
|
||||
* @param phyaddr Physical address to map from
|
||||
* @param npages The region's size in number of pages
|
||||
* @param flags Further page flags
|
||||
*
|
||||
* @return
|
||||
* - A virtual address on success
|
||||
* - 0 on failure.
|
||||
*/
|
||||
size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flags);
|
||||
|
||||
/** @brief Initialize paging subsystem
|
||||
*
|
||||
* This function uses the existing bootstrap page tables (boot_{pgd, pgt})
|
||||
|
@ -143,20 +110,20 @@ size_t virt_to_phys(size_t vir);
|
|||
*/
|
||||
int page_init(void);
|
||||
|
||||
/** @brief Map a continious region of pages
|
||||
/** @brief Map a continuous region of pages
|
||||
*
|
||||
* @param viraddr
|
||||
* @param phyaddr
|
||||
* @param npages
|
||||
* @param bits
|
||||
* @param viraddr Desired virtual address
|
||||
* @param phyaddr Physical address to map from
|
||||
* @param npages The region's size in number of pages
|
||||
* @param bits Further page flags
|
||||
* @return
|
||||
*/
|
||||
int page_map(size_t viraddr, size_t phyaddr, size_t npages, size_t bits);
|
||||
|
||||
/** @brief Unmap a continious region of pages
|
||||
*
|
||||
* @param viraddr
|
||||
* @param npages
|
||||
* @param viraddr The virtual start address
|
||||
* @param npages The range's size in pages
|
||||
* @return
|
||||
*/
|
||||
int page_unmap(size_t viraddr, size_t npages);
|
||||
|
|
|
@ -67,88 +67,6 @@ static size_t * other[PAGE_LEVELS] = {
|
|||
(size_t *) 0xFFFFE000
|
||||
};
|
||||
|
||||
/// Mapping of self referenced page map (at the end of the VAS)
|
||||
// TODO: find a more generic initialization
|
||||
static size_t* const current_map = (size_t*) (1 * 0xFFFFF000);
|
||||
|
||||
/** @brief Get the base address of the child table
|
||||
*
|
||||
* @param entry The parent entry
|
||||
* @return The child entry
|
||||
*/
|
||||
static inline size_t* get_child_entry(size_t *entry)
|
||||
{
|
||||
size_t child = (size_t) entry;
|
||||
|
||||
child <<= PAGE_MAP_BITS;
|
||||
|
||||
return (size_t*) CANONICAL(child);
|
||||
}
|
||||
|
||||
/** @brief Get the base address of the parent entry
|
||||
*
|
||||
* @param entry The child entry
|
||||
* @return The parent entry
|
||||
*/
|
||||
static inline size_t* get_parent_entry(size_t *entry)
|
||||
{
|
||||
ssize_t parent = (size_t) entry;
|
||||
|
||||
parent >>= PAGE_MAP_BITS;
|
||||
parent |= (size_t) self[0];
|
||||
parent &= ~(sizeof(size_t) - 1); // align to page_entry_t
|
||||
|
||||
return (size_t*) CANONICAL(parent);
|
||||
}
|
||||
|
||||
/** @brief Get the corresponding page map entry to a given virtual address
|
||||
*
|
||||
* Please note: this implementation requires that the tables are mapped
|
||||
* at the end of VAS!
|
||||
*/
|
||||
static inline size_t* virt_to_entry(ssize_t addr, int level)
|
||||
{
|
||||
addr >>= PAGE_MAP_BITS;
|
||||
addr |= (size_t) self[0]; //TODO: PAGE_MAP_PGT;
|
||||
|
||||
addr >>= level * PAGE_MAP_BITS;
|
||||
addr &= ~(sizeof(size_t) - 1); // align to page_entry_t
|
||||
|
||||
return (size_t*) CANONICAL(addr);
|
||||
}
|
||||
|
||||
/** @brief Get the corresponding virtual address to a page map entry */
|
||||
static inline size_t entry_to_virt(size_t* entry, int level)
|
||||
{
|
||||
size_t addr = (size_t) entry;
|
||||
|
||||
addr <<= (level+1) * PAGE_MAP_BITS;
|
||||
|
||||
return CANONICAL(addr);
|
||||
}
|
||||
|
||||
/** @brief Update page table bits (PG_*) by using arch independent flags (MAP_*) */
|
||||
static inline size_t page_bits(int flags)
|
||||
{
|
||||
#ifdef CONFIG_X86_32
|
||||
size_t bits = PG_PRESENT | PG_RW | PG_GLOBAL;
|
||||
#elif defined(CONFIG_X86_64)
|
||||
size_t bits = PG_PRESENT | PG_RW | PG_XD | PG_GLOBAL;
|
||||
#endif
|
||||
|
||||
if (flags & MAP_NO_ACCESS) bits &= ~PG_PRESENT;
|
||||
if (flags & MAP_READ_ONLY) bits &= ~PG_RW;
|
||||
#ifdef CONFIG_X86_64
|
||||
if (flags & MAP_CODE) bits &= ~PG_XD;
|
||||
#endif
|
||||
if (flags & MAP_USER_SPACE) bits &= ~PG_GLOBAL;
|
||||
if (flags & MAP_USER_SPACE) bits |= PG_USER;
|
||||
if (flags & MAP_WT) bits |= PG_PWT;
|
||||
if (flags & MAP_NO_CACHE) bits |= PG_PCD;
|
||||
|
||||
return bits;
|
||||
}
|
||||
|
||||
size_t virt_to_phys(size_t addr)
|
||||
{
|
||||
size_t vpn = addr >> PAGE_BITS; // virtual page number
|
||||
|
@ -330,202 +248,6 @@ void page_fault_handler(struct state *s)
|
|||
while(1) HALT;
|
||||
}
|
||||
|
||||
size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flags)
|
||||
{
|
||||
task_t* task = current_task;
|
||||
size_t* first[PAGE_LEVELS];
|
||||
size_t* last[PAGE_LEVELS];
|
||||
|
||||
// TODO: this behaviour should be deprecated
|
||||
if (!viraddr) {
|
||||
int vma_flags = VMA_HEAP;
|
||||
if (flags & MAP_USER_SPACE)
|
||||
vma_flags |= VMA_USER;
|
||||
|
||||
viraddr = vma_alloc(npages * PAGE_SIZE, vma_flags);
|
||||
}
|
||||
|
||||
size_t bits = page_bits(flags);
|
||||
size_t start = viraddr;
|
||||
size_t end = start + npages * PAGE_SIZE;
|
||||
|
||||
int traverse(int level, size_t* entry) {
|
||||
size_t* stop = entry + PAGE_MAP_ENTRIES;
|
||||
for (; entry != stop; entry++) {
|
||||
if (entry < last[level] && entry >= first[level]) {
|
||||
if (level) { // PGD, PDPT, PML4..
|
||||
if (*entry & PG_PRESENT) {
|
||||
if ((flags & MAP_USER_SPACE) && !(*entry & PG_USER)) {
|
||||
/* We are altering entries which cover
|
||||
* the kernel. So before changing them we need to
|
||||
* make a private copy for the task */
|
||||
size_t phyaddr = get_page();
|
||||
if (BUILTIN_EXPECT(!phyaddr, 0))
|
||||
return -ENOMEM;
|
||||
|
||||
atomic_int32_inc(&task->user_usage);
|
||||
|
||||
copy_page(phyaddr, *entry & PAGE_MASK);
|
||||
*entry = phyaddr | (*entry & ~PAGE_MASK);
|
||||
*entry &= ~PG_GLOBAL;
|
||||
*entry |= PG_USER;
|
||||
|
||||
/* We just need to flush the table itself.
|
||||
* TLB entries for the kernel remain valid
|
||||
* because we've not changed them. */
|
||||
tlb_flush_one_page(entry_to_virt(entry, 0));
|
||||
}
|
||||
}
|
||||
else {
|
||||
/* Theres no page map table available
|
||||
* which covers the region. Therefore we will create a
|
||||
* new table. */
|
||||
size_t phyaddr = get_page();
|
||||
if (BUILTIN_EXPECT(!phyaddr, 0))
|
||||
return -ENOMEM;
|
||||
|
||||
if (flags & MAP_USER_SPACE)
|
||||
atomic_int32_inc(&task->user_usage);
|
||||
|
||||
*entry = phyaddr | bits;
|
||||
|
||||
memset(get_child_entry(entry), 0x00, PAGE_SIZE); // fill with zeros
|
||||
}
|
||||
|
||||
// do "pre-order" traversal if no hugepage
|
||||
if (!(*entry & PG_PSE)) {
|
||||
int ret = traverse(level-1, get_child_entry(entry));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
else { // PGT
|
||||
if ((*entry & PG_PRESENT) && !(flags & MAP_REMAP))
|
||||
return -EINVAL;
|
||||
|
||||
*entry = phyaddr | bits;
|
||||
|
||||
if (flags & MAP_USER_SPACE)
|
||||
atomic_int32_inc(&task->user_usage);
|
||||
|
||||
if (flags & MAP_REMAP)
|
||||
tlb_flush_one_page(entry_to_virt(entry, level));
|
||||
|
||||
phyaddr += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
kprintf("map_region: map %u pages from %#lx to %#lx with flags: %#x\n", npages, viraddr, phyaddr, flags); // TODO: remove
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->page_map, 0))
|
||||
return 0;
|
||||
|
||||
// calc page tree boundaries
|
||||
int i;
|
||||
for (i=0; i<PAGE_LEVELS; i++) {
|
||||
first[i] = virt_to_entry(start, i);
|
||||
last[i] = virt_to_entry(end - 1, i) + 1; // exclusive
|
||||
}
|
||||
|
||||
// lock tables
|
||||
if (start < KERNEL_SPACE)
|
||||
spinlock_lock(&kslock);
|
||||
if (end >= KERNEL_SPACE)
|
||||
spinlock_irqsave_lock(&task->page_lock);
|
||||
|
||||
int ret = traverse(PAGE_LEVELS-1, current_map);
|
||||
|
||||
// unlock tables
|
||||
if (start < KERNEL_SPACE)
|
||||
spinlock_unlock(&kslock);
|
||||
if (end >= KERNEL_SPACE)
|
||||
spinlock_irqsave_unlock(&task->page_lock);
|
||||
|
||||
return (ret) ? 0 : viraddr;
|
||||
}
|
||||
|
||||
int unmap_region(size_t viraddr, uint32_t npages)
|
||||
{
|
||||
task_t* task = current_task;
|
||||
size_t* first[PAGE_LEVELS];
|
||||
size_t* last[PAGE_LEVELS];
|
||||
|
||||
size_t start = viraddr;
|
||||
size_t end = start + npages * PAGE_SIZE;
|
||||
|
||||
kprintf("unmap_region: unmap %u pages from %#lx\n", npages, viraddr); // TODO: remove
|
||||
|
||||
/** @return number of page table entries which a present */
|
||||
int traverse(int level, size_t* entry) {
|
||||
int used = 0;
|
||||
size_t* stop = entry + PAGE_MAP_ENTRIES;
|
||||
for (; entry != stop; entry++) {
|
||||
if (entry < last[level] && entry >= first[level]) {
|
||||
if (level) { // PGD, PDPT, PML4
|
||||
if ((*entry & PG_PRESENT) && !(*entry & PG_PSE)) {
|
||||
// do "post-order" traversal if table is present and no hugepage
|
||||
if (traverse(level-1, get_child_entry(entry)))
|
||||
used++;
|
||||
else { // child table is empty => delete it
|
||||
*entry &= ~PG_PRESENT;
|
||||
tlb_flush_one_page(entry_to_virt(entry, 0));
|
||||
|
||||
if (*entry & PG_USER) {
|
||||
if (put_page(*entry & PAGE_MASK))
|
||||
atomic_int32_dec(&task->user_usage);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else { // PGT
|
||||
*entry &= ~PG_PRESENT;
|
||||
|
||||
tlb_flush_one_page(entry_to_virt(entry, level));
|
||||
|
||||
if (*entry & PG_USER)
|
||||
atomic_int32_dec(&task->user_usage);
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (*entry & PG_PRESENT)
|
||||
used++;
|
||||
}
|
||||
}
|
||||
|
||||
return used;
|
||||
}
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->page_map, 0))
|
||||
return 0;
|
||||
|
||||
// calc page tree boundaries
|
||||
int i;
|
||||
for (i=0; i<PAGE_LEVELS; i++) {
|
||||
first[i] = virt_to_entry(start, i);
|
||||
last[i] = virt_to_entry(end - 1, i) + 1; // exclusive
|
||||
}
|
||||
|
||||
// lock tables
|
||||
if (start < KERNEL_SPACE)
|
||||
spinlock_lock(&kslock);
|
||||
if (end >= KERNEL_SPACE)
|
||||
spinlock_irqsave_lock(&task->page_lock);
|
||||
|
||||
traverse(PAGE_LEVELS-1, current_map);
|
||||
|
||||
// unlock tables
|
||||
if (start < KERNEL_SPACE)
|
||||
spinlock_unlock(&kslock);
|
||||
if (end > KERNEL_SPACE)
|
||||
spinlock_irqsave_unlock(&task->page_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int page_init(void)
|
||||
{
|
||||
size_t addr, npages;
|
||||
|
|
|
@ -37,7 +37,7 @@
|
|||
#define __MEMORY_H__
|
||||
|
||||
/** @brief Initialize the memory subsystem */
|
||||
int memory_init();
|
||||
int memory_init(void);
|
||||
|
||||
/** @brief Request physical page frames */
|
||||
size_t get_pages(size_t npages);
|
||||
|
|
|
@ -44,17 +44,6 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define MAP_NO_ACCESS (1 << 0)
|
||||
#define MAP_READ_ONLY (1 << 1)
|
||||
#define MAP_USER_SPACE (1 << 2)
|
||||
#define MAP_CODE (1 << 3)
|
||||
#define MAP_WT (1 << 4)
|
||||
#define MAP_NO_CACHE (1 << 5)
|
||||
|
||||
#define MAP_KERNEL_SPACE (0 << 2) // legacy compatibility
|
||||
#define MAP_REMAP (1 << 12)
|
||||
//#define MAP_NON_CONTINUOUS (1 << 13) // TODO
|
||||
|
||||
/** @brief General page allocator function
|
||||
*
|
||||
* This function allocates and maps whole pages.
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include <eduos/tasks.h>
|
||||
#include <eduos/syscall.h>
|
||||
#include <eduos/memory.h>
|
||||
#include <eduos/vma.h>
|
||||
|
||||
#include <asm/irq.h>
|
||||
#include <asm/irqflags.h>
|
||||
|
@ -75,7 +76,7 @@ static int wrapper(void* arg)
|
|||
*stack-- = (size_t) arg;
|
||||
*stack = (size_t) NULL; // put exit function as caller on the stack
|
||||
|
||||
#if 1
|
||||
#if 0
|
||||
// this triggers a page fault because a user task is not able to access the kernel space
|
||||
return jump_to_user_code((uint32_t) userfoo, (uint32_t) stack);
|
||||
#else
|
||||
|
@ -84,13 +85,17 @@ static int wrapper(void* arg)
|
|||
size_t vuserfoo = 0x40000000;
|
||||
page_map(vuserfoo, phys, 2, PG_PRESENT | PG_USER);
|
||||
vuserfoo += (size_t)userfoo & 0xFFF;
|
||||
vma_add(vuserfoo, vuserfoo + 2*PAGE_SIZE, VMA_USER|VMA_CACHEABLE|VMA_READ|VMA_EXECUTE);
|
||||
|
||||
// dirty hack, map ustack to the user space
|
||||
phys = virt_to_phys((size_t) ustack);
|
||||
size_t vstack = 0x80000000;
|
||||
page_map(vstack, phys, KERNEL_STACK_SIZE >> PAGE_BITS, PG_PRESENT | PG_RW | PG_USER);
|
||||
vma_add(vstack, vstack+KERNEL_STACK_SIZE, VMA_USER|VMA_CACHEABLE|VMA_READ|VMA_WRITE);
|
||||
vstack = (vstack + KERNEL_STACK_SIZE - 16 - sizeof(size_t));
|
||||
|
||||
vma_dump();
|
||||
|
||||
return jump_to_user_code(vuserfoo, vstack);
|
||||
#endif
|
||||
}
|
||||
|
@ -144,7 +149,7 @@ int main(void)
|
|||
|
||||
|
||||
create_kernel_task(&id1, foo, "foo1", NORMAL_PRIO);
|
||||
//create_kernel_task(&id2, wrapper, "userfoo", NORMAL_PRIO);
|
||||
create_kernel_task(&id2, wrapper, "userfoo", NORMAL_PRIO);
|
||||
|
||||
while(1) {
|
||||
HALT;
|
||||
|
|
|
@ -200,6 +200,8 @@ static int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio)
|
|||
task_table[i].last_stack_pointer = NULL;
|
||||
task_table[i].stack = create_stack(i);
|
||||
task_table[i].prio = prio;
|
||||
spinlock_init(&task_table[i].vma_lock);
|
||||
task_table[i].vma_list = NULL;
|
||||
|
||||
spinlock_irqsave_init(&task_table[i].page_lock);
|
||||
atomic_int32_set(&task_table[i].user_usage, 0);
|
||||
|
|
17
mm/malloc.c
17
mm/malloc.c
|
@ -110,7 +110,7 @@ static void buddy_put(buddy_t* buddy)
|
|||
spinlock_unlock(&buddy_lock);
|
||||
}
|
||||
|
||||
void buddy_dump()
|
||||
void buddy_dump(void)
|
||||
{
|
||||
size_t free = 0;
|
||||
int i;
|
||||
|
@ -133,8 +133,9 @@ void* palloc(size_t sz, uint32_t flags)
|
|||
{
|
||||
size_t phyaddr, viraddr;
|
||||
uint32_t npages = PAGE_FLOOR(sz) >> PAGE_BITS;
|
||||
int err;
|
||||
|
||||
kprintf("palloc(%lu) (%lu pages)\n", sz, npages); // TODO: remove
|
||||
//kprintf("palloc(%lu) (%lu pages)\n", sz, npages);
|
||||
|
||||
// get free virtual address space
|
||||
viraddr = vma_alloc(npages*PAGE_SIZE, VMA_HEAP);
|
||||
|
@ -149,8 +150,8 @@ void* palloc(size_t sz, uint32_t flags)
|
|||
}
|
||||
|
||||
// map physical pages to VMA
|
||||
viraddr = map_region(viraddr, phyaddr, npages, flags);
|
||||
if (BUILTIN_EXPECT(!viraddr, 0)) {
|
||||
err = page_map(viraddr, phyaddr, npages, PG_RW|PG_GLOBAL);
|
||||
if (BUILTIN_EXPECT(err, 0)) {
|
||||
vma_free(viraddr, viraddr+npages*PAGE_SIZE);
|
||||
put_pages(phyaddr, npages);
|
||||
return NULL;
|
||||
|
@ -169,13 +170,13 @@ void pfree(void* addr, size_t sz)
|
|||
size_t viraddr = (size_t) addr & PAGE_MASK;
|
||||
uint32_t npages = PAGE_FLOOR(sz) >> PAGE_BITS;
|
||||
|
||||
// memory is propably not continously mapped! (userspace heap)
|
||||
// memory is probably not continuously mapped! (userspace heap)
|
||||
for (i=0; i<npages; i++) {
|
||||
phyaddr = virt_to_phys(viraddr+i*PAGE_SIZE);
|
||||
put_page(phyaddr);
|
||||
}
|
||||
|
||||
unmap_region(viraddr, npages);
|
||||
page_unmap(viraddr, npages);
|
||||
vma_free(viraddr, viraddr+npages*PAGE_SIZE);
|
||||
}
|
||||
|
||||
|
@ -199,7 +200,7 @@ void* kmalloc(size_t sz)
|
|||
buddy->prefix.magic = BUDDY_MAGIC;
|
||||
buddy->prefix.exponent = exp;
|
||||
|
||||
kprintf("kmalloc(%lu) = %p\n", sz, buddy+1); // TODO: remove
|
||||
//kprintf("kmalloc(%lu) = %p\n", sz, buddy+1);
|
||||
|
||||
// pointer arithmetic: we hide the prefix
|
||||
return buddy+1;
|
||||
|
@ -210,7 +211,7 @@ void kfree(void *addr)
|
|||
if (BUILTIN_EXPECT(!addr, 0))
|
||||
return;
|
||||
|
||||
kprintf("kfree(%lu)\n", addr); // TODO: remove
|
||||
//kprintf("kfree(%lu)\n", addr);
|
||||
|
||||
buddy_t* buddy = (buddy_t*) addr - 1; // get prefix
|
||||
|
||||
|
|
23
mm/memory.c
23
mm/memory.c
|
@ -159,6 +159,8 @@ int put_pages(size_t phyaddr, size_t npages)
|
|||
|
||||
int copy_page(size_t pdest, size_t psrc)
|
||||
{
|
||||
int err;
|
||||
|
||||
static size_t viraddr;
|
||||
if (!viraddr) { // statically allocate virtual memory area
|
||||
viraddr = vma_alloc(2 * PAGE_SIZE, VMA_HEAP);
|
||||
|
@ -167,10 +169,17 @@ int copy_page(size_t pdest, size_t psrc)
|
|||
}
|
||||
|
||||
// map pages
|
||||
size_t vsrc = map_region(viraddr, psrc, 1, MAP_KERNEL_SPACE);
|
||||
size_t vdest = map_region(viraddr + PAGE_SIZE, pdest, 1, MAP_KERNEL_SPACE);
|
||||
if (BUILTIN_EXPECT(!vsrc || !vdest, 0)) {
|
||||
unmap_region(viraddr, 2);
|
||||
size_t vsrc = viraddr;
|
||||
err = page_map(vsrc, psrc, 1, PG_GLOBAL|PG_RW);
|
||||
if (BUILTIN_EXPECT(err, 0)) {
|
||||
page_unmap(viraddr, 1);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
size_t vdest = viraddr + PAGE_SIZE;
|
||||
err = page_map(vdest, pdest, 1, PG_GLOBAL|PG_RW);
|
||||
if (BUILTIN_EXPECT(err, 0)) {
|
||||
page_unmap(viraddr + PAGE_SIZE, 1);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -180,7 +189,7 @@ int copy_page(size_t pdest, size_t psrc)
|
|||
memcpy((void*) vdest, (void*) vsrc, PAGE_SIZE);
|
||||
|
||||
// householding
|
||||
unmap_region(viraddr, 2);
|
||||
page_unmap(viraddr, 2);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -194,7 +203,7 @@ int memory_init(void)
|
|||
// mark all memory as used
|
||||
memset(bitmap, 0xff, BITMAP_SIZE);
|
||||
|
||||
// enable paging and map SMP, VGA, Multiboot modules etc.
|
||||
// enable paging and map Multiboot modules etc.
|
||||
ret = page_init();
|
||||
if (BUILTIN_EXPECT(ret, 0)) {
|
||||
kputs("Failed to initialize paging!\n");
|
||||
|
@ -281,7 +290,7 @@ int memory_init(void)
|
|||
atomic_int32_dec(&total_available_pages);
|
||||
}
|
||||
|
||||
//ret = vma_init();
|
||||
ret = vma_init();
|
||||
if (BUILTIN_EXPECT(ret, 0)) {
|
||||
kprintf("Failed to initialize VMA regions: %d\n", ret);
|
||||
return ret;
|
||||
|
|
19
mm/vma.c
19
mm/vma.c
|
@ -78,18 +78,15 @@ int vma_init(void)
|
|||
if (BUILTIN_EXPECT(ret, 0))
|
||||
goto out;
|
||||
|
||||
if (mb_info->flags & MULTIBOOT_INFO_MEM_MAP) {
|
||||
ret = vma_add(PAGE_CEIL((size_t) mb_info->mmap_addr),
|
||||
PAGE_FLOOR((size_t) mb_info->mmap_addr + mb_info->mmap_length),
|
||||
VMA_READ|VMA_CACHEABLE);
|
||||
}
|
||||
|
||||
if (mb_info->flags & MULTIBOOT_INFO_MODS) {
|
||||
multiboot_module_t* mmodule = (multiboot_module_t*) ((size_t) mb_info->mods_addr);
|
||||
|
||||
ret = vma_add(PAGE_CEIL((size_t) mb_info->mods_addr),
|
||||
PAGE_FLOOR((size_t) mb_info->mods_addr + mb_info->mods_count*sizeof(multiboot_module_t)),
|
||||
VMA_READ|VMA_CACHEABLE);
|
||||
PAGE_FLOOR((size_t) mb_info->mods_addr + mb_info->mods_count*sizeof(multiboot_module_t)),
|
||||
VMA_READ|VMA_CACHEABLE);
|
||||
|
||||
//TODO: Why do we get error code -22 (-EINVAL);
|
||||
ret = 0; // TODO: Remove workaround
|
||||
|
||||
int i;
|
||||
for(i=0; i<mb_info->mods_count; i++) {
|
||||
|
@ -112,7 +109,7 @@ size_t vma_alloc(size_t size, uint32_t flags)
|
|||
spinlock_t* lock;
|
||||
vma_t** list;
|
||||
|
||||
kprintf("vma_alloc: size = %#lx, flags = %#x\n", size, flags); // TODO: remove
|
||||
//kprintf("vma_alloc: size = %#lx, flags = %#x\n", size, flags);
|
||||
|
||||
size_t base, limit; // boundaries for search
|
||||
size_t start, end; // boundaries of free gaps
|
||||
|
@ -186,7 +183,7 @@ int vma_free(size_t start, size_t end)
|
|||
vma_t* vma;
|
||||
vma_t** list = NULL;
|
||||
|
||||
kprintf("vma_free: start = %#lx, end = %#lx\n", start, end); // TODO: remove
|
||||
//kprintf("vma_free: start = %#lx, end = %#lx\n", start, end);
|
||||
|
||||
if (BUILTIN_EXPECT(start >= end, 0))
|
||||
return -EINVAL;
|
||||
|
@ -278,7 +275,7 @@ int vma_add(size_t start, size_t end, uint32_t flags)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
kprintf("vma_add: start = %#lx, end = %#lx, flags = %#x\n", start, end, flags); // TODO: remove
|
||||
//kprintf("vma_add: start = %#lx, end = %#lx, flags = %#x\n", start, end, flags);
|
||||
|
||||
spinlock_lock(lock);
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue