2010-12-10 06:16:58 +00:00
|
|
|
/*
|
2012-05-29 20:47:45 +02:00
|
|
|
* Copyright 2012 Stefan Lankes, Chair for Operating Systems,
|
2010-12-10 06:16:58 +00:00
|
|
|
* RWTH Aachen University
|
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*
|
|
|
|
* This file is part of MetalSVM.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <metalsvm/stddef.h>
|
|
|
|
#include <metalsvm/stdio.h>
|
|
|
|
#include <metalsvm/stdlib.h>
|
|
|
|
#include <metalsvm/mmu.h>
|
2011-02-24 09:36:05 +01:00
|
|
|
#include <metalsvm/vma.h>
|
2010-12-10 06:16:58 +00:00
|
|
|
#include <metalsvm/string.h>
|
|
|
|
#include <metalsvm/page.h>
|
|
|
|
#include <metalsvm/spinlock.h>
|
|
|
|
#include <metalsvm/processor.h>
|
|
|
|
#include <metalsvm/tasks.h>
|
|
|
|
#include <metalsvm/errno.h>
|
|
|
|
#include <asm/irq.h>
|
|
|
|
#include <asm/multiboot.h>
|
2011-07-18 15:51:26 +02:00
|
|
|
#include <asm/apic.h>
|
2010-12-10 06:16:58 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Virtual Memory Layout of the standard configuration
|
|
|
|
* (1 GB kernel space)
|
|
|
|
*
|
2013-10-11 16:21:53 +02:00
|
|
|
* 0x000000000000 - 0x0000000FFFFF: reserved for IO devices (16MB)
|
|
|
|
* 0x000000100000 - 0x00000DEADFFF: Kernel (size depends on the configuration) (221MB)
|
|
|
|
* 0x00000DEAE000 - 0x00003FFFFFFF: Kernel heap
|
|
|
|
* 0xFF8000000000 - 0xFFFFFFFFFFFF: Paging structures are mapped in this region (1GB)
|
2010-12-10 06:16:58 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note that linker symbols are not variables, they have no memory allocated for
|
|
|
|
* maintaining a value, rather their address is their value.
|
|
|
|
*/
|
|
|
|
extern const void kernel_start;
|
|
|
|
extern const void kernel_end;
|
|
|
|
|
2013-10-11 16:21:53 +02:00
|
|
|
// boot task's page directory and page directory lock
|
|
|
|
extern page_map_t boot_pml4;
|
2011-03-04 13:47:06 +01:00
|
|
|
static spinlock_t kslock = SPINLOCK_INIT;
|
2012-06-10 08:05:24 +02:00
|
|
|
static int paging_enabled = 0;
|
2010-12-10 06:16:58 +00:00
|
|
|
|
2013-10-11 16:21:53 +02:00
|
|
|
/*static page_map_t boot_pml4 = {{[0 ... MAP_ENTRIES-1] = 0}};
|
|
|
|
static page_map_t boot_pdpt = {{[0 ... MAP_ENTRIES-1] = 0}};
|
|
|
|
static page_map_t boot_pgd = {{[0 ... MAP_ENTRIES-1] = 0}};
|
|
|
|
static page_map_t boot_pgt = {{[0 ... MAP_ENTRIES-1] = 0}};*/
|
|
|
|
|
|
|
|
page_map_t* get_boot_page_map(void)
|
2010-12-10 06:16:58 +00:00
|
|
|
{
|
2013-10-11 16:21:53 +02:00
|
|
|
return &boot_pml4;
|
2010-12-10 06:16:58 +00:00
|
|
|
}
|
|
|
|
|
2013-10-11 16:21:53 +02:00
|
|
|
int create_page_map(task_t* task, int copy)
|
2010-12-15 12:08:37 +00:00
|
|
|
{
|
2013-10-10 11:09:36 +02:00
|
|
|
// TODO: Currently, we support only kernel tasks
|
2012-06-10 08:05:24 +02:00
|
|
|
// => all tasks are able to use the same pgd
|
2011-02-18 21:20:15 +01:00
|
|
|
|
|
|
|
if (BUILTIN_EXPECT(!paging_enabled, 0))
|
|
|
|
return -EINVAL;
|
2010-12-15 12:08:37 +00:00
|
|
|
|
2013-10-11 16:21:53 +02:00
|
|
|
task->page_map = get_boot_page_map();
|
2012-05-29 20:47:45 +02:00
|
|
|
|
|
|
|
return 0;
|
2011-02-24 18:32:58 +01:00
|
|
|
}
|
|
|
|
|
2013-10-11 16:21:53 +02:00
|
|
|
int drop_page_map(void)
|
2011-02-24 18:32:58 +01:00
|
|
|
{
|
2012-05-29 20:47:45 +02:00
|
|
|
#if 0
|
2013-10-11 16:21:53 +02:00
|
|
|
page_map_t* pgd = per_core(current_task)->page_map;
|
2011-02-24 18:32:58 +01:00
|
|
|
size_t phy_pgd = virt_to_phys((size_t) pgd);
|
2011-03-04 11:38:40 +01:00
|
|
|
task_t* task = per_core(current_task);
|
2011-03-04 22:44:53 +01:00
|
|
|
uint32_t i;
|
2011-02-24 18:32:58 +01:00
|
|
|
|
|
|
|
if (BUILTIN_EXPECT(pgd == &boot_pgd, 0))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2013-10-11 16:21:53 +02:00
|
|
|
spinlock_lock(&task->page_lock);
|
2011-02-24 18:32:58 +01:00
|
|
|
|
2011-04-08 15:56:28 +02:00
|
|
|
for(i=0; i<1024; i++) {
|
|
|
|
if (pgd->entries[i] & PG_USER) {
|
2012-05-24 10:49:45 +02:00
|
|
|
put_page(pgd->entries[i] & PAGE_MASK);
|
2011-03-04 11:38:40 +01:00
|
|
|
pgd->entries[i] = 0;
|
|
|
|
}
|
2011-03-04 22:44:53 +01:00
|
|
|
}
|
2011-02-24 18:32:58 +01:00
|
|
|
|
|
|
|
// freeing the page directory
|
|
|
|
put_page(phy_pgd);
|
|
|
|
|
2013-10-11 16:21:53 +02:00
|
|
|
task->page_map = NULL;
|
2011-02-24 18:32:58 +01:00
|
|
|
|
2013-10-11 16:21:53 +02:00
|
|
|
spinlock_unlock(&task->page_lock);
|
2012-05-29 20:47:45 +02:00
|
|
|
#endif
|
2011-02-24 18:32:58 +01:00
|
|
|
|
2011-02-18 21:20:15 +01:00
|
|
|
return 0;
|
2010-12-15 12:08:37 +00:00
|
|
|
}
|
|
|
|
|
2011-02-24 10:15:58 +01:00
|
|
|
size_t virt_to_phys(size_t viraddr)
|
2010-12-10 06:16:58 +00:00
|
|
|
{
|
2011-02-24 10:15:58 +01:00
|
|
|
task_t* task = per_core(current_task);
|
2013-10-11 16:21:53 +02:00
|
|
|
page_map_t* pdpt, * pgd , * pgt;
|
|
|
|
uint16_t index_pml4 = (viraddr >> 39) & 0x1FF;
|
|
|
|
uint16_t index_pdpt = (viraddr >> 30) & 0x1FF;
|
|
|
|
uint16_t index_pgd = (viraddr >> 21) & 0x1FF;
|
|
|
|
uint16_t index_pgt = (viraddr >> 12) & 0x1FF;
|
2010-12-10 06:16:58 +00:00
|
|
|
size_t ret = 0;
|
|
|
|
|
2011-02-18 21:20:15 +01:00
|
|
|
if (!paging_enabled)
|
|
|
|
return viraddr;
|
|
|
|
|
2013-10-11 16:21:53 +02:00
|
|
|
if (BUILTIN_EXPECT(!task || !task->page_map, 0))
|
2010-12-10 06:16:58 +00:00
|
|
|
return 0;
|
|
|
|
|
2013-10-11 16:21:53 +02:00
|
|
|
spinlock_irqsave_lock(&task->page_lock);
|
2011-02-24 19:06:32 +01:00
|
|
|
|
2013-10-10 11:09:36 +02:00
|
|
|
// TODO: Currently, we allocate pages only in kernel space.
|
2012-06-10 08:05:24 +02:00
|
|
|
// => physical address of the page table is identical of the virtual address
|
2013-10-11 16:21:53 +02:00
|
|
|
pdpt = (page_map_t*) (task->page_map->entries[index_pml4] & PAGE_MASK);
|
|
|
|
if (!pdpt)
|
2012-06-10 08:05:24 +02:00
|
|
|
goto out;
|
|
|
|
|
2013-10-11 16:21:53 +02:00
|
|
|
pgd = (page_map_t*) (pdpt->entries[index_pdpt] & PAGE_MASK);
|
|
|
|
if (!pgd)
|
2012-06-10 08:05:24 +02:00
|
|
|
goto out;
|
2010-12-10 06:16:58 +00:00
|
|
|
|
2013-10-11 16:21:53 +02:00
|
|
|
pgt = (page_map_t*) (pgd->entries[index_pgd] & PAGE_MASK);
|
2012-06-10 08:05:24 +02:00
|
|
|
if (!pgt)
|
2010-12-10 06:16:58 +00:00
|
|
|
goto out;
|
|
|
|
|
2013-10-11 16:21:53 +02:00
|
|
|
ret = (size_t) (pgt->entries[index_pgt] & PAGE_MASK);
|
2012-06-10 08:05:24 +02:00
|
|
|
if (!ret)
|
2010-12-10 06:16:58 +00:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
ret = ret | (viraddr & 0xFFF); // add page offset
|
|
|
|
out:
|
|
|
|
//kprintf("vir %p to phy %p\n", viraddr, ret);
|
2011-02-21 08:36:06 +01:00
|
|
|
|
2013-10-11 16:21:53 +02:00
|
|
|
spinlock_irqsave_unlock(&task->page_lock);
|
2011-02-24 19:06:32 +01:00
|
|
|
|
2010-12-10 06:16:58 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-02-24 10:15:58 +01:00
|
|
|
size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flags)
|
2010-12-10 06:16:58 +00:00
|
|
|
{
|
2011-02-24 10:15:58 +01:00
|
|
|
task_t* task = per_core(current_task);
|
2013-10-11 16:21:53 +02:00
|
|
|
page_map_t* pdpt, * pgd, * pgt;
|
|
|
|
uint16_t index_pml4, index_pdpt;
|
|
|
|
uint16_t index_pgd, index_pgt;
|
2012-06-10 08:05:24 +02:00
|
|
|
size_t i, ret;
|
2010-12-10 06:16:58 +00:00
|
|
|
|
2013-10-11 16:21:53 +02:00
|
|
|
if (BUILTIN_EXPECT(!task || !task->page_map, 0))
|
2010-12-10 06:16:58 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (BUILTIN_EXPECT(!paging_enabled && (viraddr != phyaddr), 0))
|
|
|
|
return 0;
|
|
|
|
|
2011-03-04 13:47:06 +01:00
|
|
|
if (flags & MAP_KERNEL_SPACE)
|
2012-09-10 15:37:45 +02:00
|
|
|
spinlock_lock(&kslock);
|
2011-03-04 13:47:06 +01:00
|
|
|
else
|
2013-10-11 16:21:53 +02:00
|
|
|
spinlock_irqsave_lock(&task->page_lock);
|
2011-02-24 19:06:32 +01:00
|
|
|
|
2010-12-10 06:16:58 +00:00
|
|
|
if (!viraddr) {
|
2011-02-24 10:15:58 +01:00
|
|
|
viraddr = vm_alloc(npages, flags);
|
2010-12-10 06:16:58 +00:00
|
|
|
if (BUILTIN_EXPECT(!viraddr, 0)) {
|
2012-06-10 08:05:24 +02:00
|
|
|
kputs("map_region: found no valid virtual address\n");
|
2012-09-10 15:37:45 +02:00
|
|
|
ret = 0;
|
|
|
|
goto out;
|
2010-12-10 06:16:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = viraddr;
|
|
|
|
for(i=0; i<npages; i++, viraddr+=PAGE_SIZE, phyaddr+=PAGE_SIZE) {
|
2013-10-11 16:21:53 +02:00
|
|
|
index_pml4 = (viraddr >> 39) & 0x1FF;
|
|
|
|
index_pdpt = (viraddr >> 30) & 0x1FF;
|
|
|
|
index_pgd = (viraddr >> 21) & 0x1FF;
|
|
|
|
index_pgt = (viraddr >> 12) & 0x1FF;
|
2010-12-10 06:16:58 +00:00
|
|
|
|
2013-10-11 16:21:53 +02:00
|
|
|
pdpt = (page_map_t*) (task->page_map->entries[index_pml4] & PAGE_MASK);
|
2012-06-10 08:05:24 +02:00
|
|
|
if (!pgt) {
|
|
|
|
kputs("map_region: out of memory\n");
|
2012-09-10 15:37:45 +02:00
|
|
|
ret = 0;
|
|
|
|
goto out;
|
2012-06-10 08:05:24 +02:00
|
|
|
}
|
2013-10-11 16:21:53 +02:00
|
|
|
|
|
|
|
pgd = (page_map_t*) (pdpt->entries[index_pdpt] & PAGE_MASK);
|
|
|
|
if (!pgd) {
|
2012-06-10 08:05:24 +02:00
|
|
|
kputs("map_region: out of memory\n");
|
2012-09-10 15:37:45 +02:00
|
|
|
ret = 0;
|
|
|
|
goto out;
|
2012-06-10 08:05:24 +02:00
|
|
|
}
|
2010-12-10 06:16:58 +00:00
|
|
|
|
2013-10-11 16:21:53 +02:00
|
|
|
pgt = (page_map_t*) (pgd->entries[index_pgd] & PAGE_MASK);
|
2012-06-10 08:05:24 +02:00
|
|
|
if (!pgt) {
|
|
|
|
kputs("map_region: out of memory\n");
|
2012-09-10 15:37:45 +02:00
|
|
|
ret = 0;
|
|
|
|
goto out;
|
2012-06-10 08:05:24 +02:00
|
|
|
}
|
2010-12-10 06:16:58 +00:00
|
|
|
|
|
|
|
/* convert physical address to virtual */
|
2012-06-10 08:05:24 +02:00
|
|
|
// Currently, we allocate pages only in kernel space.
|
|
|
|
// => physical address of the page table is identical of the virtual address
|
|
|
|
//if (paging_enabled)
|
2013-10-11 16:21:53 +02:00
|
|
|
// pgt = (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index*PAGE_SIZE) & PAGE_MASK);
|
2010-12-10 06:16:58 +00:00
|
|
|
|
2013-10-11 16:21:53 +02:00
|
|
|
if (pgt->entries[index_pgt] && !(flags & MAP_REMAP)) {
|
2012-04-02 14:55:29 +02:00
|
|
|
kprintf("0x%x is already mapped\n", viraddr);
|
2012-09-10 15:37:45 +02:00
|
|
|
ret = 0;
|
|
|
|
goto out;
|
2010-12-10 06:16:58 +00:00
|
|
|
}
|
|
|
|
|
2011-02-08 18:37:56 +00:00
|
|
|
if (flags & MAP_USER_SPACE)
|
2013-10-11 16:21:53 +02:00
|
|
|
pgt->entries[index_pgt] = USER_PAGE|(phyaddr & PAGE_MASK);
|
2011-02-08 18:37:56 +00:00
|
|
|
else
|
2013-10-11 16:21:53 +02:00
|
|
|
pgt->entries[index_pgt] = KERN_PAGE|(phyaddr & PAGE_MASK);
|
2011-02-08 18:37:56 +00:00
|
|
|
|
2010-12-10 06:16:58 +00:00
|
|
|
if (flags & MAP_NO_CACHE)
|
2013-10-11 16:21:53 +02:00
|
|
|
pgt->entries[index_pgt] |= PG_PCD;
|
2011-11-16 03:12:09 -08:00
|
|
|
|
2011-08-15 07:16:12 -07:00
|
|
|
if (flags & MAP_NO_ACCESS)
|
2013-10-11 16:21:53 +02:00
|
|
|
pgt->entries[index_pgt] &= ~PG_PRESENT;
|
2010-12-10 06:16:58 +00:00
|
|
|
|
2011-09-03 13:25:49 -07:00
|
|
|
if (flags & MAP_WT)
|
2013-10-11 16:21:53 +02:00
|
|
|
pgt->entries[index_pgt] |= PG_PWT;
|
2011-09-03 13:25:49 -07:00
|
|
|
|
2011-03-04 22:44:53 +01:00
|
|
|
if (flags & MAP_USER_SPACE)
|
|
|
|
atomic_int32_inc(&task->user_usage);
|
|
|
|
|
2013-10-21 08:52:11 +02:00
|
|
|
if (flags & MAP_NO_EXECUTION)
|
|
|
|
pgt->entries[index_pgt] |= PG_XD;
|
|
|
|
|
2010-12-10 06:16:58 +00:00
|
|
|
tlb_flush_one_page(viraddr);
|
|
|
|
}
|
2011-03-04 11:38:40 +01:00
|
|
|
|
2012-09-10 15:37:45 +02:00
|
|
|
out:
|
|
|
|
if (flags & MAP_KERNEL_SPACE)
|
|
|
|
spinlock_unlock(&kslock);
|
|
|
|
else
|
2013-10-11 16:21:53 +02:00
|
|
|
spinlock_irqsave_unlock(&task->page_lock);
|
2011-02-24 19:06:32 +01:00
|
|
|
|
2010-12-10 06:16:58 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-02-24 09:36:05 +01:00
|
|
|
int change_page_permissions(size_t start, size_t end, uint32_t flags)
|
|
|
|
{
|
2012-05-29 20:47:45 +02:00
|
|
|
#if 0
|
2011-02-24 09:36:05 +01:00
|
|
|
uint32_t index1, index2, newflags;
|
2012-05-24 10:49:45 +02:00
|
|
|
size_t viraddr = start & PAGE_MASK;
|
2011-02-24 09:36:05 +01:00
|
|
|
size_t phyaddr;
|
2013-10-11 16:21:53 +02:00
|
|
|
page_map_t* pgt;
|
|
|
|
page_map_t* pgd;
|
2011-03-04 11:38:40 +01:00
|
|
|
task_t* task = per_core(current_task);
|
2011-02-24 09:36:05 +01:00
|
|
|
|
|
|
|
if (BUILTIN_EXPECT(!paging_enabled, 0))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2013-10-11 16:21:53 +02:00
|
|
|
pgd = per_core(current_task)->page_map;
|
2011-02-24 09:36:05 +01:00
|
|
|
if (BUILTIN_EXPECT(!pgd, 0))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2013-10-11 16:21:53 +02:00
|
|
|
spinlock_lock(&task->page_lock);
|
2011-02-24 09:36:05 +01:00
|
|
|
|
|
|
|
while (viraddr < end)
|
|
|
|
{
|
|
|
|
index1 = viraddr >> 22;
|
|
|
|
index2 = (viraddr >> 12) & 0x3FF;
|
|
|
|
|
|
|
|
while ((viraddr < end) && (index2 < 1024)) {
|
2013-10-11 16:21:53 +02:00
|
|
|
pgt = (page_map_t*) (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
2011-02-24 09:36:05 +01:00
|
|
|
if (pgt && pgt->entries[index2]) {
|
2012-05-24 10:49:45 +02:00
|
|
|
phyaddr = pgt->entries[index2] & PAGE_MASK;
|
2011-02-24 09:36:05 +01:00
|
|
|
newflags = pgt->entries[index2] & 0xFFF; // get old flags
|
|
|
|
|
2011-12-14 01:39:12 -08:00
|
|
|
if (!(newflags & PG_SVM_INIT)) {
|
|
|
|
if ((newflags & PG_SVM_STRONG) && !(newflags & PG_PRESENT) && (flags & (VMA_READ|VMA_WRITE) && !(flags & VMA_NOACCESS)))
|
|
|
|
newflags |= PG_PRESENT;
|
|
|
|
else if ((newflags & PG_SVM_STRONG) && (newflags & PG_PRESENT) && (flags & VMA_NOACCESS))
|
|
|
|
newflags &= ~PG_PRESENT;
|
|
|
|
}
|
2011-08-15 07:16:12 -07:00
|
|
|
|
2011-02-24 09:36:05 +01:00
|
|
|
// update flags
|
2011-08-29 01:49:19 -07:00
|
|
|
if (!(flags & VMA_WRITE)) {
|
2011-02-24 09:36:05 +01:00
|
|
|
newflags &= ~PG_RW;
|
2011-08-29 01:49:19 -07:00
|
|
|
} else {
|
2011-02-24 09:36:05 +01:00
|
|
|
newflags |= PG_RW;
|
2011-08-29 01:49:19 -07:00
|
|
|
}
|
2011-02-24 09:36:05 +01:00
|
|
|
|
2012-05-24 10:49:45 +02:00
|
|
|
pgt->entries[index2] = (newflags & 0xFFF) | (phyaddr & PAGE_MASK);
|
2011-02-24 09:36:05 +01:00
|
|
|
|
|
|
|
tlb_flush_one_page(viraddr);
|
|
|
|
}
|
|
|
|
|
|
|
|
index2++;
|
|
|
|
viraddr += PAGE_SIZE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-11 16:21:53 +02:00
|
|
|
spinlock_unlock(&task->page_lock);
|
2012-05-29 20:47:45 +02:00
|
|
|
#endif
|
2011-02-24 09:36:05 +01:00
|
|
|
|
2012-06-10 08:05:24 +02:00
|
|
|
return -EINVAL;
|
2011-02-24 09:36:05 +01:00
|
|
|
}
|
|
|
|
|
2010-12-10 06:16:58 +00:00
|
|
|
/*
|
|
|
|
* Use the first fit algorithm to find a valid address range
|
|
|
|
*
|
|
|
|
* TODO: O(n) => bad performance, we need a better approach
|
|
|
|
*/
|
2011-02-24 10:15:58 +01:00
|
|
|
size_t vm_alloc(uint32_t npages, uint32_t flags)
|
2010-12-10 06:16:58 +00:00
|
|
|
{
|
2011-02-24 10:15:58 +01:00
|
|
|
task_t* task = per_core(current_task);
|
2012-06-10 08:05:24 +02:00
|
|
|
size_t viraddr, i, j, ret = 0;
|
2010-12-10 06:16:58 +00:00
|
|
|
size_t start, end;
|
2013-10-11 16:21:53 +02:00
|
|
|
page_map_t* pdpt, * pgd, * pgt;
|
|
|
|
uint16_t index_pml4, index_pdpt;
|
|
|
|
uint16_t index_pgd, index_pgt;
|
2010-12-10 06:16:58 +00:00
|
|
|
|
2013-10-11 16:21:53 +02:00
|
|
|
if (BUILTIN_EXPECT(!task || !task->page_map || !paging_enabled, 0))
|
2010-12-10 06:16:58 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (flags & MAP_KERNEL_SPACE) {
|
2012-06-12 09:24:38 +02:00
|
|
|
start = (((size_t) &kernel_end) + 10*PAGE_SIZE) & PAGE_MASK;
|
|
|
|
end = (KERNEL_SPACE - PAGE_SIZE) & PAGE_MASK;
|
2010-12-10 06:16:58 +00:00
|
|
|
} else {
|
2012-05-24 10:49:45 +02:00
|
|
|
start = KERNEL_SPACE & PAGE_MASK;
|
|
|
|
end = PAGE_MASK;
|
2010-12-10 06:16:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (BUILTIN_EXPECT(!npages, 0))
|
|
|
|
return 0;
|
|
|
|
|
2012-09-10 15:37:45 +02:00
|
|
|
if (flags & MAP_KERNEL_SPACE)
|
|
|
|
spinlock_lock(&kslock);
|
|
|
|
else
|
2013-10-11 16:21:53 +02:00
|
|
|
spinlock_irqsave_lock(&task->page_lock);
|
2011-03-04 13:47:06 +01:00
|
|
|
|
2010-12-10 06:16:58 +00:00
|
|
|
viraddr = i = start;
|
|
|
|
j = 0;
|
|
|
|
do {
|
2013-10-11 16:21:53 +02:00
|
|
|
index_pml4 = (viraddr >> 39) & 0x1FF;
|
|
|
|
index_pdpt = (viraddr >> 30) & 0x1FF;
|
|
|
|
index_pgd = (viraddr >> 21) & 0x1FF;
|
|
|
|
index_pgt = (viraddr >> 12) & 0x1FF;
|
2012-06-10 08:05:24 +02:00
|
|
|
|
|
|
|
// Currently, we allocate pages only in kernel space.
|
|
|
|
// => physical address of the page table is identical of the virtual address
|
2013-10-11 16:21:53 +02:00
|
|
|
pdpt = (page_map_t*) (task->page_map->entries[index_pml4] & PAGE_MASK);
|
|
|
|
if (!pdpt) {
|
|
|
|
i += (size_t)MAP_ENTRIES*MAP_ENTRIES*MAP_ENTRIES*PAGE_SIZE;
|
|
|
|
j += MAP_ENTRIES*MAP_ENTRIES*MAP_ENTRIES;
|
2012-06-10 08:05:24 +02:00
|
|
|
continue;
|
|
|
|
}
|
2010-12-10 06:16:58 +00:00
|
|
|
|
2013-10-11 16:21:53 +02:00
|
|
|
pgd = (page_map_t*) (pdpt->entries[index_pdpt] & PAGE_MASK);
|
|
|
|
if (!pgd) {
|
|
|
|
i += MAP_ENTRIES*MAP_ENTRIES*PAGE_SIZE;
|
|
|
|
j += MAP_ENTRIES*MAP_ENTRIES;
|
2012-06-10 08:05:24 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2013-10-11 16:21:53 +02:00
|
|
|
pgt = (page_map_t*) (pgd->entries[index_pgd] & PAGE_MASK);
|
2012-06-10 08:05:24 +02:00
|
|
|
if (!pgt) {
|
2013-10-11 16:21:53 +02:00
|
|
|
i += MAP_ENTRIES*PAGE_SIZE;
|
|
|
|
j += MAP_ENTRIES;
|
2012-06-10 08:05:24 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2013-10-11 16:21:53 +02:00
|
|
|
if (!(pgt->entries[index_pgt])) {
|
2012-06-12 09:24:38 +02:00
|
|
|
i += PAGE_SIZE;
|
2010-12-10 06:16:58 +00:00
|
|
|
j++;
|
|
|
|
} else {
|
|
|
|
// restart search
|
|
|
|
j = 0;
|
|
|
|
viraddr = i + PAGE_SIZE;
|
|
|
|
i = i + PAGE_SIZE;
|
|
|
|
}
|
|
|
|
} while((j < npages) && (i<=end));
|
|
|
|
|
|
|
|
if ((j >= npages) && (viraddr < end))
|
2011-02-24 19:06:32 +01:00
|
|
|
ret = viraddr;
|
2010-12-10 06:16:58 +00:00
|
|
|
|
2012-09-10 15:37:45 +02:00
|
|
|
if (flags & MAP_KERNEL_SPACE)
|
|
|
|
spinlock_unlock(&kslock);
|
|
|
|
else
|
2013-10-11 16:21:53 +02:00
|
|
|
spinlock_irqsave_unlock(&task->page_lock);
|
2011-02-24 19:06:32 +01:00
|
|
|
|
|
|
|
return ret;
|
2010-12-10 06:16:58 +00:00
|
|
|
}
|
|
|
|
|
2011-03-04 22:44:53 +01:00
|
|
|
int unmap_region(size_t viraddr, uint32_t npages)
|
|
|
|
{
|
|
|
|
task_t* task = per_core(current_task);
|
2013-10-11 16:21:53 +02:00
|
|
|
page_map_t* pdpt, * pgd, * pgt;
|
2012-06-10 08:05:24 +02:00
|
|
|
size_t i;
|
2013-10-11 16:21:53 +02:00
|
|
|
uint16_t index_pml4, index_pdpt;
|
|
|
|
uint16_t index_pgd, index_pgt;
|
2011-03-04 22:44:53 +01:00
|
|
|
|
2013-10-11 16:21:53 +02:00
|
|
|
if (BUILTIN_EXPECT(!task || !task->page_map || !paging_enabled, 0))
|
2012-06-10 08:05:24 +02:00
|
|
|
return -EINVAL;
|
2011-03-04 22:44:53 +01:00
|
|
|
|
|
|
|
if (viraddr <= KERNEL_SPACE)
|
2012-09-10 15:37:45 +02:00
|
|
|
spinlock_lock(&kslock);
|
2011-03-04 22:44:53 +01:00
|
|
|
else
|
2013-10-11 16:21:53 +02:00
|
|
|
spinlock_irqsave_lock(&task->page_lock);
|
2011-03-04 22:44:53 +01:00
|
|
|
|
2012-06-10 08:05:24 +02:00
|
|
|
i = 0;
|
|
|
|
while(i<npages)
|
2011-03-04 22:44:53 +01:00
|
|
|
{
|
2013-10-11 16:21:53 +02:00
|
|
|
index_pml4 = (viraddr >> 39) & 0x1FF;
|
|
|
|
index_pdpt = (viraddr >> 30) & 0x1FF;
|
|
|
|
index_pgd = (viraddr >> 21) & 0x1FF;
|
|
|
|
index_pgt = (viraddr >> 12) & 0x1FF;
|
2012-06-10 08:05:24 +02:00
|
|
|
|
|
|
|
// Currently, we allocate pages only in kernel space.
|
|
|
|
// => physical address of the page table is identical of the virtual address
|
2013-10-11 16:21:53 +02:00
|
|
|
pdpt = (page_map_t*) (task->page_map->entries[index_pml4] & PAGE_MASK);
|
|
|
|
if (!pdpt) {
|
|
|
|
viraddr += (size_t) MAP_ENTRIES*MAP_ENTRIES*MAP_ENTRIES*PAGE_SIZE;
|
|
|
|
i += MAP_ENTRIES*MAP_ENTRIES*MAP_ENTRIES;
|
2012-06-10 08:05:24 +02:00
|
|
|
continue;
|
|
|
|
}
|
2011-03-04 22:44:53 +01:00
|
|
|
|
2013-10-11 16:21:53 +02:00
|
|
|
pgd = (page_map_t*) (pdpt->entries[index_pdpt] & PAGE_MASK);
|
|
|
|
if (!pgd) {
|
|
|
|
viraddr += MAP_ENTRIES*MAP_ENTRIES*PAGE_SIZE;
|
|
|
|
i += MAP_ENTRIES*MAP_ENTRIES;
|
2011-03-04 22:44:53 +01:00
|
|
|
continue;
|
2012-06-10 08:05:24 +02:00
|
|
|
}
|
|
|
|
|
2013-10-11 16:21:53 +02:00
|
|
|
pgt = (page_map_t*) (pgd->entries[index_pgd] & PAGE_MASK);
|
2012-06-10 08:05:24 +02:00
|
|
|
if (!pgt) {
|
2013-10-11 16:21:53 +02:00
|
|
|
viraddr += MAP_ENTRIES*PAGE_SIZE;
|
|
|
|
i += MAP_ENTRIES;
|
2012-06-10 08:05:24 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2013-10-11 16:21:53 +02:00
|
|
|
if (pgt->entries[index_pgt])
|
|
|
|
pgt->entries[index_pgt] &= ~PG_PRESENT;
|
2011-03-04 22:44:53 +01:00
|
|
|
|
2012-06-10 08:05:24 +02:00
|
|
|
viraddr +=PAGE_SIZE;
|
|
|
|
i++;
|
2013-10-11 16:21:53 +02:00
|
|
|
|
2011-03-04 22:44:53 +01:00
|
|
|
if (viraddr > KERNEL_SPACE)
|
|
|
|
atomic_int32_dec(&task->user_usage);
|
|
|
|
|
|
|
|
tlb_flush_one_page(viraddr);
|
|
|
|
}
|
|
|
|
|
2012-09-10 15:37:45 +02:00
|
|
|
if (viraddr <= KERNEL_SPACE)
|
|
|
|
spinlock_unlock(&kslock);
|
|
|
|
else
|
2013-10-11 16:21:53 +02:00
|
|
|
spinlock_irqsave_unlock(&task->page_lock);
|
2011-03-04 22:44:53 +01:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-02-24 10:15:58 +01:00
|
|
|
int vm_free(size_t viraddr, uint32_t npages)
|
2010-12-10 06:16:58 +00:00
|
|
|
{
|
2011-02-24 10:15:58 +01:00
|
|
|
task_t* task = per_core(current_task);
|
2013-10-11 16:21:53 +02:00
|
|
|
page_map_t* pdpt, * pgd, * pgt;
|
2012-06-10 08:05:24 +02:00
|
|
|
size_t i;
|
2013-10-11 16:21:53 +02:00
|
|
|
uint16_t index_pml4, index_pdpt;
|
|
|
|
uint16_t index_pgd, index_pgt;
|
2010-12-10 06:16:58 +00:00
|
|
|
|
2013-10-11 16:21:53 +02:00
|
|
|
if (BUILTIN_EXPECT(!task || !task->page_map || !paging_enabled, 0))
|
2010-12-10 06:16:58 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2011-03-04 13:47:06 +01:00
|
|
|
if (viraddr <= KERNEL_SPACE)
|
2012-09-10 15:37:45 +02:00
|
|
|
spinlock_lock(&kslock);
|
2011-03-04 13:47:06 +01:00
|
|
|
else
|
2013-10-11 16:21:53 +02:00
|
|
|
spinlock_irqsave_lock(&task->page_lock);
|
2011-02-24 19:06:32 +01:00
|
|
|
|
2012-06-10 08:05:24 +02:00
|
|
|
i = 0;
|
|
|
|
while(i<npages)
|
2010-12-10 06:16:58 +00:00
|
|
|
{
|
2013-10-11 16:21:53 +02:00
|
|
|
index_pml4 = (viraddr >> 39) & 0x1FF;
|
|
|
|
index_pdpt = (viraddr >> 30) & 0x1FF;
|
|
|
|
index_pgd = (viraddr >> 21) & 0x1FF;
|
|
|
|
index_pgt = (viraddr >> 12) & 0x1FF;
|
2012-06-10 08:05:24 +02:00
|
|
|
|
|
|
|
// Currently, we allocate pages only in kernel space.
|
|
|
|
// => physical address of the page table is identical of the virtual address
|
2013-10-11 16:21:53 +02:00
|
|
|
pdpt = (page_map_t*) (task->page_map->entries[index_pml4] & PAGE_MASK);
|
|
|
|
if (!pdpt) {
|
|
|
|
viraddr += (size_t) MAP_ENTRIES*MAP_ENTRIES*MAP_ENTRIES*PAGE_SIZE;
|
|
|
|
i += MAP_ENTRIES*MAP_ENTRIES*MAP_ENTRIES;
|
2010-12-10 06:16:58 +00:00
|
|
|
continue;
|
2012-06-10 08:05:24 +02:00
|
|
|
}
|
2010-12-10 06:16:58 +00:00
|
|
|
|
2013-10-11 16:21:53 +02:00
|
|
|
pgd = (page_map_t*) (pdpt->entries[index_pdpt] & PAGE_MASK);
|
|
|
|
if (!pgd) {
|
|
|
|
viraddr += MAP_ENTRIES*MAP_ENTRIES*PAGE_SIZE;
|
|
|
|
i += MAP_ENTRIES*MAP_ENTRIES;
|
2012-06-10 08:05:24 +02:00
|
|
|
continue;
|
|
|
|
}
|
2010-12-10 06:16:58 +00:00
|
|
|
|
2013-10-11 16:21:53 +02:00
|
|
|
pgt = (page_map_t*) (pgd->entries[index_pgd] & PAGE_MASK);
|
2012-06-10 08:05:24 +02:00
|
|
|
if (!pgt) {
|
2013-10-11 16:21:53 +02:00
|
|
|
viraddr += MAP_ENTRIES*PAGE_SIZE;
|
|
|
|
i += MAP_ENTRIES;
|
2012-06-10 08:05:24 +02:00
|
|
|
continue;
|
|
|
|
}
|
2010-12-10 06:16:58 +00:00
|
|
|
|
2013-10-11 16:21:53 +02:00
|
|
|
if (pgt->entries[index_pgt])
|
|
|
|
pgt->entries[index_pgt] = 0;
|
2010-12-10 06:16:58 +00:00
|
|
|
|
2012-06-10 08:05:24 +02:00
|
|
|
viraddr +=PAGE_SIZE;
|
|
|
|
i++;
|
2011-03-04 11:38:40 +01:00
|
|
|
|
2012-06-10 08:05:24 +02:00
|
|
|
tlb_flush_one_page(viraddr);
|
|
|
|
}
|
2010-12-10 06:16:58 +00:00
|
|
|
|
2012-09-10 15:37:45 +02:00
|
|
|
if (viraddr <= KERNEL_SPACE)
|
|
|
|
spinlock_unlock(&kslock);
|
|
|
|
else
|
2013-10-11 16:21:53 +02:00
|
|
|
spinlock_irqsave_unlock(&task->page_lock);
|
2011-03-04 11:38:40 +01:00
|
|
|
|
2010-12-10 06:16:58 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pagefault_handler(struct state *s)
|
|
|
|
{
|
2011-04-20 21:23:22 +02:00
|
|
|
task_t* task = per_core(current_task);
|
2013-10-11 16:21:53 +02:00
|
|
|
//page_map_t* pgd = task->page_map;
|
|
|
|
//page_map_t* pgt = NULL;
|
2011-04-20 21:23:22 +02:00
|
|
|
size_t viraddr = read_cr2();
|
2012-06-10 08:05:24 +02:00
|
|
|
//size_t phyaddr;
|
2011-04-20 21:23:22 +02:00
|
|
|
|
2012-06-10 08:05:24 +02:00
|
|
|
#if 0
|
2011-04-20 21:23:22 +02:00
|
|
|
if ((viraddr >= task->start_heap) && (viraddr <= task->end_heap) && (viraddr > KERNEL_SPACE)) {
|
2012-05-24 10:49:45 +02:00
|
|
|
viraddr = viraddr & PAGE_MASK;
|
2011-04-20 21:23:22 +02:00
|
|
|
|
|
|
|
phyaddr = get_page();
|
|
|
|
if (BUILTIN_EXPECT(!phyaddr, 0))
|
|
|
|
goto default_handler;
|
|
|
|
|
2011-04-22 09:31:33 +02:00
|
|
|
if (map_region(viraddr, phyaddr, 1, MAP_USER_SPACE) == viraddr) {
|
2011-04-20 21:23:22 +02:00
|
|
|
memset((void*) viraddr, 0x00, PAGE_SIZE);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
kprintf("Could not map 0x%x at 0x%x\n", phyaddr, viraddr);
|
|
|
|
put_page(phyaddr);
|
|
|
|
}
|
2011-08-24 09:39:17 +02:00
|
|
|
#endif
|
2011-08-16 03:29:54 -07:00
|
|
|
|
2012-07-22 20:10:16 +02:00
|
|
|
//default_handler:
|
2012-06-10 08:05:24 +02:00
|
|
|
kprintf("PAGE FAULT: Task %u got page fault at %p (irq %llu, cs:rip 0x%llx:0x%llx)\n", task->id, viraddr, s->int_no, s->cs, s->rip);
|
|
|
|
kprintf("Register state: rax = 0x%llx, rbx = 0x%llx, rcx = 0x%llx, rdx = 0x%llx, rdi = 0x%llx, rsi = 0x%llx, rbp = 0x%llx, rsp = 0x%llx\n",
|
|
|
|
s->rax, s->rbx, s->rcx, s->rdx, s->rdi, s->rsi, s->rbp, s->rsp);
|
2010-12-10 06:16:58 +00:00
|
|
|
|
2012-06-10 08:05:24 +02:00
|
|
|
while(1);
|
2011-08-02 06:19:26 -07:00
|
|
|
irq_enable();
|
2010-12-10 06:16:58 +00:00
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
|
|
|
int arch_paging_init(void)
|
|
|
|
{
|
2012-06-10 08:05:24 +02:00
|
|
|
uint32_t i, npages;
|
2010-12-10 06:16:58 +00:00
|
|
|
|
|
|
|
// uninstall default handler and install our own
|
|
|
|
irq_uninstall_handler(14);
|
|
|
|
irq_install_handler(14, pagefault_handler);
|
|
|
|
|
2012-06-10 08:05:24 +02:00
|
|
|
// kernel is already maped into the kernel space (see entry64.asm)
|
|
|
|
// this includes .data, .bss, .text, video memory and the multiboot structure
|
2010-12-10 06:16:58 +00:00
|
|
|
|
2011-07-18 09:13:33 +02:00
|
|
|
#if MAX_CORES > 1
|
|
|
|
// Reserve page for smp boot code
|
|
|
|
if (!map_region(SMP_SETUP_ADDR, SMP_SETUP_ADDR, 1, MAP_KERNEL_SPACE|MAP_NO_CACHE)) {
|
|
|
|
kputs("could not reserve page for smp boot code\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2010-12-10 06:16:58 +00:00
|
|
|
#ifdef CONFIG_MULTIBOOT
|
2011-04-07 20:36:43 +02:00
|
|
|
#if 0
|
2010-12-10 06:16:58 +00:00
|
|
|
/*
|
|
|
|
* Map reserved memory regions into the kernel space
|
|
|
|
*/
|
2011-02-16 22:35:46 +01:00
|
|
|
if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MEM_MAP)) {
|
2010-12-10 06:16:58 +00:00
|
|
|
multiboot_memory_map_t* mmap = (multiboot_memory_map_t*) mb_info->mmap_addr;
|
|
|
|
multiboot_memory_map_t* mmap_end = (void*) ((size_t) mb_info->mmap_addr + mb_info->mmap_length);
|
|
|
|
|
|
|
|
while (mmap < mmap_end) {
|
|
|
|
if (mmap->type != MULTIBOOT_MEMORY_AVAILABLE) {
|
|
|
|
npages = mmap->len / PAGE_SIZE;
|
|
|
|
if ((mmap->addr+mmap->len) % PAGE_SIZE)
|
|
|
|
npages++;
|
2011-02-24 10:15:58 +01:00
|
|
|
map_region(mmap->addr, mmap->addr, npages, MAP_KERNEL_SPACE|MAP_NO_CACHE);
|
2010-12-10 06:16:58 +00:00
|
|
|
}
|
|
|
|
mmap++;
|
|
|
|
}
|
|
|
|
}
|
2011-04-07 20:36:43 +02:00
|
|
|
#endif
|
2010-12-10 06:16:58 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Modules like the init ram disk are already loaded.
|
|
|
|
* Therefore, we map these moduels into the kernel space.
|
|
|
|
*/
|
2011-02-16 22:35:46 +01:00
|
|
|
if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MODS)) {
|
2012-05-24 10:49:45 +02:00
|
|
|
multiboot_module_t* mmodule = (multiboot_module_t*) ((size_t) mb_info->mods_addr);
|
2010-12-10 06:16:58 +00:00
|
|
|
|
2012-02-02 22:54:09 +01:00
|
|
|
npages = mb_info->mods_count * sizeof(multiboot_module_t) >> PAGE_SHIFT;
|
|
|
|
if (mb_info->mods_count * sizeof(multiboot_module_t) & (PAGE_SIZE-1))
|
|
|
|
npages++;
|
2012-06-12 09:24:38 +02:00
|
|
|
map_region((size_t) (mb_info->mods_addr), (size_t) (mb_info->mods_addr), npages, MAP_REMAP|MAP_KERNEL_SPACE);
|
2012-02-02 22:54:09 +01:00
|
|
|
|
2010-12-10 06:16:58 +00:00
|
|
|
for(i=0; i<mb_info->mods_count; i++, mmodule++) {
|
|
|
|
// map physical address to the same virtual address
|
2011-04-19 20:01:18 +02:00
|
|
|
npages = (mmodule->mod_end - mmodule->mod_start) >> PAGE_SHIFT;
|
|
|
|
if (mmodule->mod_end & (PAGE_SIZE-1))
|
2010-12-10 06:16:58 +00:00
|
|
|
npages++;
|
2012-06-10 12:10:54 +02:00
|
|
|
kprintf("Map module %s at 0x%x (%u pages)\n", (char*) mmodule->cmdline, mmodule->mod_start, npages);
|
2012-06-12 09:24:38 +02:00
|
|
|
map_region((size_t) (mmodule->mod_start), (size_t) (mmodule->mod_start), npages, MAP_REMAP|MAP_KERNEL_SPACE);
|
2010-12-10 06:16:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2012-06-10 08:05:24 +02:00
|
|
|
/* signalize that we are able to use paging */
|
2010-12-10 06:16:58 +00:00
|
|
|
paging_enabled = 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* we turned on paging
|
2012-06-10 23:40:22 +02:00
|
|
|
* => now, we are able to register our task
|
2010-12-10 06:16:58 +00:00
|
|
|
*/
|
2012-06-10 23:40:22 +02:00
|
|
|
register_task();
|
2010-12-10 06:16:58 +00:00
|
|
|
|
2011-07-18 15:51:26 +02:00
|
|
|
// APIC registers into the kernel address space
|
|
|
|
map_apic();
|
|
|
|
|
2010-12-10 06:16:58 +00:00
|
|
|
return 0;
|
|
|
|
}
|