refactored paging code to be more universial and suitable for 64bit paging
This commit is contained in:
parent
e290d41149
commit
9b47b3ef45
7 changed files with 266 additions and 289 deletions
|
@ -84,33 +84,22 @@
|
|||
/// This is a whole set of flags (PRESENT,RW,USER) for userspace pages
|
||||
#define USER_PAGE (PG_PRESENT|PG_RW|PG_USER)
|
||||
|
||||
#if __SIZEOF_POINTER__ == 4
|
||||
#define PGT_ENTRIES 1024
|
||||
#elif __SIZEOF_POINTER__ == 8
|
||||
#define PGT_ENTRIES 512
|
||||
#ifdef CONFIG_X86_32
|
||||
/// On a 32-bit system, each page map structure consists of 1024 entries (= 2^10)
|
||||
#define MAP_ENTRIES 1024
|
||||
#elif defined(CONFIG_X86_64)
|
||||
/// On a 64-bit system, each page map structure consists of 512 entries (= 2^9)
|
||||
#define MAP_ENTRIES 512
|
||||
#endif
|
||||
|
||||
/** @brief Page table structure
|
||||
*
|
||||
* This structure keeps page table entries.\n
|
||||
* On a 32bit system, a page table consists normally of 1024 entries.
|
||||
*/
|
||||
typedef struct page_table
|
||||
{
|
||||
/// Page table entries are unsigned 32bit integers.
|
||||
size_t entries[PGT_ENTRIES];
|
||||
} page_table_t __attribute__ ((aligned (4096)));
|
||||
|
||||
/** @brief Page directory structure
|
||||
/** @brief General page map structure
|
||||
*
|
||||
* This structure keeps page directory entries.\
|
||||
* On a 32bit system, a page directory consists normally of 1024 entries.
|
||||
* This page map structure is a general type for all indirecton levels.\n
|
||||
* As all page map levels containing the same amount of entries.
|
||||
*/
|
||||
typedef struct page_dir
|
||||
{
|
||||
/// Page dir entries are unsigned 32bit integers.
|
||||
size_t entries[PGT_ENTRIES];
|
||||
} page_dir_t __attribute__ ((aligned (4096)));
|
||||
typedef struct page_map {
|
||||
size_t entries[MAP_ENTRIES];
|
||||
} __attribute__ ((aligned (4096))) page_map_t;
|
||||
|
||||
/** @brief Converts a virtual address to a physical
|
||||
*
|
||||
|
@ -192,7 +181,7 @@ int arch_paging_init(void);
|
|||
*
|
||||
* @return Returns the address of the boot task's page dir array.
|
||||
*/
|
||||
page_dir_t* get_boot_pgd(void);
|
||||
page_map_t* get_boot_page_map(void);
|
||||
|
||||
/** @brief Setup a new page directory for a new user-level task
|
||||
*
|
||||
|
@ -203,18 +192,18 @@ page_dir_t* get_boot_pgd(void);
|
|||
* - counter of allocated page tables
|
||||
* - -ENOMEM (-12) on failure
|
||||
*/
|
||||
int create_pgd(task_t* task, int copy);
|
||||
int create_page_map(task_t* task, int copy);
|
||||
|
||||
/** @brief Delete page directory and its page tables
|
||||
/** @brief Delete all page map structures of the current task
|
||||
*
|
||||
* Puts page tables and page directory back to buffer and
|
||||
* sets the task's page directory pointer to NULL
|
||||
* Puts PML4, PDPT, PGD, PGT tables back to buffer and
|
||||
* sets the task's page map pointer to NULL
|
||||
*
|
||||
* @return
|
||||
* - 0 on success
|
||||
* - -EINVAL (-22) on failure (in case PGD is still the boot-pgd).
|
||||
*/
|
||||
int drop_pgd(void);
|
||||
int drop_page_map(void);
|
||||
|
||||
/** @brief Change the page permission in the page tables of the current task
|
||||
*
|
||||
|
|
|
@ -86,10 +86,10 @@ startup_stack:
|
|||
|
||||
SECTION .data
|
||||
; create default page tables for the 64bit kernel
|
||||
global boot_pgd ; aka PML4
|
||||
global boot_pml4
|
||||
ALIGN 4096 ; of course, the page tables have to be page aligned
|
||||
NOPTS equ 512
|
||||
boot_pgd times 512 DQ 0
|
||||
boot_pml4 times 512 DQ 0
|
||||
boot_pdpt times 512 DQ 0
|
||||
boot_pd times 512 DQ 0
|
||||
boot_pt times (NOPTS*512) DQ 0
|
||||
|
@ -113,7 +113,7 @@ smp_entry:
|
|||
mov cr4, eax
|
||||
|
||||
; initialize page table
|
||||
mov edi, boot_pgd
|
||||
mov edi, boot_pml4
|
||||
mov cr3, edi
|
||||
|
||||
; we need to enable PAE modus
|
||||
|
@ -211,7 +211,7 @@ stublet:
|
|||
jz Linvalid ; They aren't, there is no long mode.
|
||||
|
||||
; initialize page table
|
||||
mov edi, boot_pgd
|
||||
mov edi, boot_pml4
|
||||
mov cr3, edi
|
||||
|
||||
; So lets make PML4T[0] point to the PDPT and so on:
|
||||
|
|
|
@ -50,7 +50,7 @@ size_t* get_current_stack(void)
|
|||
#endif
|
||||
|
||||
// use new page table
|
||||
write_cr3(virt_to_phys((size_t)curr_task->pgd));
|
||||
write_cr3(virt_to_phys((size_t)curr_task->page_map));
|
||||
|
||||
return curr_task->last_stack_pointer;
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@
|
|||
* 0x00100000 - 0x0DEADFFF: Kernel (size depends on the configuration) (221MB)
|
||||
* 0x0DEAE000 - 0x3FFFEFFF: Kernel heap (801MB)
|
||||
* 0x3FFFF000 - 0x3FFFFFFF: Page Tables are mapped in this region (4KB)
|
||||
* (The last 256 entries belongs to kernel space)
|
||||
* (The first 256 entries belongs to kernel space)
|
||||
*/
|
||||
|
||||
/*
|
||||
|
@ -57,13 +57,14 @@ extern const void kernel_start;
|
|||
extern const void kernel_end;
|
||||
|
||||
// boot task's page directory and page directory lock
|
||||
static page_dir_t boot_pgd = {{[0 ... PGT_ENTRIES-1] = 0}};
|
||||
static page_table_t pgt_container = {{[0 ... PGT_ENTRIES-1] = 0}};
|
||||
static page_table_t boot_pgt[KERNEL_SPACE/(1024*PAGE_SIZE)];
|
||||
static page_map_t boot_pgd = {{[0 ... MAP_ENTRIES-1] = 0}};
|
||||
static page_map_t boot_pgt[KERNEL_SPACE/(1024*PAGE_SIZE)];
|
||||
static page_map_t pgt_container = {{[0 ... MAP_ENTRIES-1] = 0}};
|
||||
|
||||
static spinlock_t kslock = SPINLOCK_INIT;
|
||||
static int paging_enabled = 0;
|
||||
|
||||
page_dir_t* get_boot_pgd(void)
|
||||
page_map_t* get_boot_page_map(void)
|
||||
{
|
||||
return &boot_pgd;
|
||||
}
|
||||
|
@ -74,23 +75,23 @@ page_dir_t* get_boot_pgd(void)
|
|||
* No PGD locking is needed because only create_pgd use this function and holds already the
|
||||
* PGD lock.
|
||||
*/
|
||||
inline static size_t copy_page_table(task_t* task, uint32_t pgd_index, page_table_t* pgt, int* counter)
|
||||
inline static size_t copy_page_table(task_t* task, uint32_t pgd_index, page_map_t* pgt, int* counter)
|
||||
{
|
||||
uint32_t i;
|
||||
page_table_t* new_pgt;
|
||||
page_map_t* new_pgt;
|
||||
size_t phyaddr;
|
||||
|
||||
if (BUILTIN_EXPECT(!pgt, 0))
|
||||
return 0;
|
||||
|
||||
new_pgt = kmalloc(sizeof(page_table_t));
|
||||
new_pgt = kmalloc(sizeof(page_map_t));
|
||||
if (!new_pgt)
|
||||
return 0;
|
||||
memset(new_pgt, 0x00, sizeof(page_table_t));
|
||||
memset(new_pgt, 0x00, sizeof(page_map_t));
|
||||
if (counter)
|
||||
(*counter)++;
|
||||
|
||||
for(i=0; i<PGT_ENTRIES; i++) {
|
||||
for(i=0; i<MAP_ENTRIES; i++) {
|
||||
if (pgt->entries[i] & PAGE_MASK) {
|
||||
if (!(pgt->entries[i] & PG_USER)) {
|
||||
// Kernel page => copy only page entries
|
||||
|
@ -117,11 +118,11 @@ inline static size_t copy_page_table(task_t* task, uint32_t pgd_index, page_tabl
|
|||
return phyaddr;
|
||||
}
|
||||
|
||||
int create_pgd(task_t* task, int copy)
|
||||
int create_page_map(task_t* task, int copy)
|
||||
{
|
||||
page_dir_t* pgd;
|
||||
page_table_t* pgt;
|
||||
page_table_t* pgt_container;
|
||||
page_map_t* pgd;
|
||||
page_map_t* pgt;
|
||||
page_map_t* pgt_container;
|
||||
uint32_t i;
|
||||
uint32_t index1, index2;
|
||||
size_t viraddr, phyaddr;
|
||||
|
@ -133,25 +134,25 @@ int create_pgd(task_t* task, int copy)
|
|||
|
||||
// we already know the virtual address of the "page table container"
|
||||
// (see file header)
|
||||
pgt_container = (page_table_t*) ((KERNEL_SPACE - PAGE_SIZE) & PAGE_MASK);
|
||||
pgt_container = (page_map_t*) ((KERNEL_SPACE - PAGE_SIZE) & PAGE_MASK);
|
||||
|
||||
// create new page directory for the new task
|
||||
pgd = kmalloc(sizeof(page_dir_t));
|
||||
pgd = kmalloc(sizeof(page_map_t));
|
||||
if (!pgd)
|
||||
return -ENOMEM;
|
||||
memset(pgd, 0x00, sizeof(page_dir_t));
|
||||
memset(pgd, 0x00, sizeof(page_map_t));
|
||||
|
||||
// create a new "page table container" for the new task
|
||||
pgt = kmalloc(sizeof(page_table_t));
|
||||
pgt = kmalloc(sizeof(page_map_t));
|
||||
if (!pgt) {
|
||||
kfree(pgd, sizeof(page_dir_t));
|
||||
kfree(pgd, sizeof(page_map_t));
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(pgt, 0x00, sizeof(page_table_t));
|
||||
memset(pgt, 0x00, sizeof(page_map_t));
|
||||
|
||||
spinlock_lock(&kslock);
|
||||
|
||||
for(i=0; i<PGT_ENTRIES; i++) {
|
||||
for(i=0; i<MAP_ENTRIES; i++) {
|
||||
pgd->entries[i] = boot_pgd.entries[i];
|
||||
// only kernel entries will be copied
|
||||
if (pgd->entries[i] && !(pgd->entries[i] & PG_USER))
|
||||
|
@ -169,36 +170,33 @@ int create_pgd(task_t* task, int copy)
|
|||
pgd->entries[index1] = ((size_t) virt_to_phys((size_t) pgt) & PAGE_MASK)|KERN_TABLE;
|
||||
pgt->entries[index2] = ((size_t) virt_to_phys((size_t) pgt) & PAGE_MASK)|KERN_PAGE;
|
||||
|
||||
task->pgd = pgd;
|
||||
task->page_map = pgd;
|
||||
|
||||
if (copy) {
|
||||
spinlock_irqsave_lock(&curr_task->pgd_lock);
|
||||
spinlock_irqsave_lock(&curr_task->page_lock);
|
||||
|
||||
for (i=KERNEL_SPACE/(1024*PAGE_SIZE); i<1024; i++) {
|
||||
if (!(curr_task->pgd->entries[i]))
|
||||
if (!(curr_task->page_map->entries[i]))
|
||||
continue;
|
||||
if (!(curr_task->pgd->entries[i] & PG_USER))
|
||||
if (!(curr_task->page_map->entries[i] & PG_USER))
|
||||
continue;
|
||||
|
||||
phyaddr = copy_page_table(task, i, (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + i*PAGE_SIZE) & PAGE_MASK), &counter);
|
||||
phyaddr = copy_page_table(task, i, (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + i*PAGE_SIZE) & PAGE_MASK), &counter);
|
||||
if (phyaddr) {
|
||||
pgd->entries[i] = (phyaddr & PAGE_MASK) | (curr_task->pgd->entries[i] & 0xFFF);
|
||||
pgd->entries[i] = (phyaddr & PAGE_MASK) | (curr_task->page_map->entries[i] & 0xFFF);
|
||||
pgt->entries[i] = (phyaddr & PAGE_MASK) | KERN_PAGE;
|
||||
}
|
||||
}
|
||||
|
||||
spinlock_irqsave_unlock(&curr_task->pgd_lock);
|
||||
spinlock_irqsave_unlock(&curr_task->page_lock);
|
||||
}
|
||||
|
||||
return counter;
|
||||
}
|
||||
|
||||
/*
|
||||
* drops all page frames and the PGD of a user task
|
||||
*/
|
||||
int drop_pgd(void)
|
||||
int drop_page_map(void)
|
||||
{
|
||||
page_dir_t* pgd = per_core(current_task)->pgd;
|
||||
page_map_t* pgd = per_core(current_task)->page_map;
|
||||
size_t phy_pgd = virt_to_phys((size_t) pgd);
|
||||
task_t* task = per_core(current_task);
|
||||
uint32_t i;
|
||||
|
@ -206,9 +204,9 @@ int drop_pgd(void)
|
|||
if (BUILTIN_EXPECT(pgd == &boot_pgd, 0))
|
||||
return -EINVAL;
|
||||
|
||||
spinlock_irqsave_lock(&task->pgd_lock);
|
||||
spinlock_irqsave_lock(&task->page_lock);
|
||||
|
||||
for(i=0; i<PGT_ENTRIES; i++) {
|
||||
for(i=0; i<MAP_ENTRIES; i++) {
|
||||
if (pgd->entries[i] & PG_USER) {
|
||||
put_page(pgd->entries[i] & PAGE_MASK);
|
||||
pgd->entries[i] = 0;
|
||||
|
@ -218,9 +216,9 @@ int drop_pgd(void)
|
|||
// freeing the page directory
|
||||
put_page(phy_pgd);
|
||||
|
||||
task->pgd = NULL;
|
||||
task->page_map = NULL;
|
||||
|
||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||
spinlock_irqsave_unlock(&task->page_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -229,24 +227,24 @@ size_t virt_to_phys(size_t viraddr)
|
|||
{
|
||||
task_t* task = per_core(current_task);
|
||||
uint32_t index1, index2;
|
||||
page_table_t* pgt;
|
||||
page_map_t* pgt;
|
||||
size_t ret = 0;
|
||||
|
||||
if (!paging_enabled)
|
||||
return viraddr;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd, 0))
|
||||
if (BUILTIN_EXPECT(!task || !task->page_map, 0))
|
||||
return 0;
|
||||
|
||||
spinlock_irqsave_lock(&task->pgd_lock);
|
||||
spinlock_irqsave_lock(&task->page_lock);
|
||||
|
||||
index1 = viraddr >> 22;
|
||||
index2 = (viraddr >> 12) & 0x3FF;
|
||||
|
||||
if (!(task->pgd->entries[index1] & PAGE_MASK))
|
||||
if (!(task->page_map->entries[index1] & PAGE_MASK))
|
||||
goto out;
|
||||
|
||||
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
pgt = (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
if (!pgt || !(pgt->entries[index2]))
|
||||
goto out;
|
||||
|
||||
|
@ -255,7 +253,7 @@ size_t virt_to_phys(size_t viraddr)
|
|||
out:
|
||||
//kprintf("vir %p to phy %p\n", viraddr, ret);
|
||||
|
||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||
spinlock_irqsave_unlock(&task->page_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -263,11 +261,11 @@ out:
|
|||
size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flags)
|
||||
{
|
||||
task_t* task = per_core(current_task);
|
||||
page_table_t* pgt;
|
||||
page_map_t* pgt;
|
||||
size_t index, i;
|
||||
size_t ret;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd, 0))
|
||||
if (BUILTIN_EXPECT(!task || !task->page_map, 0))
|
||||
return 0;
|
||||
|
||||
if (BUILTIN_EXPECT(!paging_enabled && (viraddr != phyaddr), 0))
|
||||
|
@ -276,7 +274,7 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
|
|||
if (flags & MAP_KERNEL_SPACE)
|
||||
spinlock_lock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_lock(&task->pgd_lock);
|
||||
spinlock_irqsave_lock(&task->page_lock);
|
||||
|
||||
if (!viraddr) {
|
||||
viraddr = vm_alloc(npages, flags);
|
||||
|
@ -292,10 +290,10 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
|
|||
for(i=0; i<npages; i++, viraddr+=PAGE_SIZE, phyaddr+=PAGE_SIZE) {
|
||||
index = viraddr >> 22;
|
||||
|
||||
if (!(task->pgd->entries[index])) {
|
||||
page_table_t* pgt_container;
|
||||
if (!(task->page_map->entries[index])) {
|
||||
page_map_t* pgt_container;
|
||||
|
||||
pgt = (page_table_t*) get_pages(1);
|
||||
pgt = (page_map_t*) get_page();
|
||||
if (BUILTIN_EXPECT(!pgt, 0)) {
|
||||
kputs("map_address: out of memory\n");
|
||||
ret = 0;
|
||||
|
@ -304,17 +302,17 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
|
|||
|
||||
// set the new page table into the directory
|
||||
if (flags & MAP_USER_SPACE)
|
||||
task->pgd->entries[index] = (uint32_t)pgt|USER_TABLE;
|
||||
task->page_map->entries[index] = (uint32_t)pgt|USER_TABLE;
|
||||
else
|
||||
task->pgd->entries[index] = (uint32_t)pgt|KERN_TABLE;
|
||||
task->page_map->entries[index] = (uint32_t)pgt|KERN_TABLE;
|
||||
|
||||
// if paging is already enabled, we need to use the virtual address
|
||||
if (paging_enabled)
|
||||
// we already know the virtual address of the "page table container"
|
||||
// (see file header)
|
||||
pgt_container = (page_table_t*) ((KERNEL_SPACE - PAGE_SIZE) & PAGE_MASK);
|
||||
pgt_container = (page_map_t*) ((KERNEL_SPACE - PAGE_SIZE) & PAGE_MASK);
|
||||
else
|
||||
pgt_container = (page_table_t*) (task->pgd->entries[(KERNEL_SPACE - PAGE_SIZE) >> 22] & PAGE_MASK);
|
||||
pgt_container = (page_map_t*) (task->page_map->entries[(KERNEL_SPACE - PAGE_SIZE) >> 22] & PAGE_MASK);
|
||||
|
||||
if (BUILTIN_EXPECT(!pgt_container, 0)) {
|
||||
kputs("map_address: internal error\n");
|
||||
|
@ -330,11 +328,11 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
|
|||
memset((void*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index*PAGE_SIZE) & PAGE_MASK), 0x00, PAGE_SIZE);
|
||||
else
|
||||
memset(pgt, 0x00, PAGE_SIZE);
|
||||
} else pgt = (page_table_t*) (task->pgd->entries[index] & PAGE_MASK);
|
||||
} else pgt = (page_map_t*) (task->page_map->entries[index] & PAGE_MASK);
|
||||
|
||||
/* convert physical address to virtual */
|
||||
if (paging_enabled)
|
||||
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index*PAGE_SIZE) & PAGE_MASK);
|
||||
pgt = (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index*PAGE_SIZE) & PAGE_MASK);
|
||||
|
||||
index = (viraddr >> 12) & 0x3FF;
|
||||
if (pgt->entries[index] && !(flags & MAP_REMAP)) {
|
||||
|
@ -382,7 +380,7 @@ out:
|
|||
if (flags & MAP_KERNEL_SPACE)
|
||||
spinlock_unlock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||
spinlock_irqsave_unlock(&task->page_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -392,18 +390,18 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
|
|||
uint32_t index1, index2, newflags;
|
||||
size_t viraddr = start & 0xFFFFF000;
|
||||
size_t phyaddr;
|
||||
page_table_t* pgt;
|
||||
page_dir_t* pgd;
|
||||
page_map_t* pgt;
|
||||
page_map_t* pgd;
|
||||
task_t* task = per_core(current_task);
|
||||
|
||||
if (BUILTIN_EXPECT(!paging_enabled, 0))
|
||||
return -EINVAL;
|
||||
|
||||
pgd = per_core(current_task)->pgd;
|
||||
pgd = per_core(current_task)->page_map;
|
||||
if (BUILTIN_EXPECT(!pgd, 0))
|
||||
return -EINVAL;
|
||||
|
||||
spinlock_irqsave_lock(&task->pgd_lock);
|
||||
spinlock_irqsave_lock(&task->page_lock);
|
||||
|
||||
while (viraddr < end)
|
||||
{
|
||||
|
@ -411,7 +409,7 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
|
|||
index2 = (viraddr >> 12) & 0x3FF;
|
||||
|
||||
while ((viraddr < end) && (index2 < 1024)) {
|
||||
pgt = (page_table_t*) (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
pgt = (page_map_t*) (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
if (pgt && pgt->entries[index2]) {
|
||||
phyaddr = pgt->entries[index2] & PAGE_MASK;
|
||||
newflags = pgt->entries[index2] & 0xFFF; // get old flags
|
||||
|
@ -448,7 +446,7 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
|
|||
}
|
||||
}
|
||||
|
||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||
spinlock_irqsave_unlock(&task->page_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -464,9 +462,9 @@ size_t vm_alloc(uint32_t npages, uint32_t flags)
|
|||
uint32_t index1, index2, j;
|
||||
size_t viraddr, i, ret = 0;
|
||||
size_t start, end;
|
||||
page_table_t* pgt;
|
||||
page_map_t* pgt;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
|
||||
if (BUILTIN_EXPECT(!task || !task->page_map || !paging_enabled, 0))
|
||||
return 0;
|
||||
|
||||
if (flags & MAP_KERNEL_SPACE) {
|
||||
|
@ -483,7 +481,7 @@ size_t vm_alloc(uint32_t npages, uint32_t flags)
|
|||
if (flags & MAP_KERNEL_SPACE)
|
||||
spinlock_lock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_lock(&task->pgd_lock);
|
||||
spinlock_irqsave_lock(&task->page_lock);
|
||||
|
||||
viraddr = i = start;
|
||||
j = 0;
|
||||
|
@ -491,7 +489,7 @@ size_t vm_alloc(uint32_t npages, uint32_t flags)
|
|||
index1 = i >> 22;
|
||||
index2 = (i >> 12) & 0x3FF;
|
||||
|
||||
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
pgt = (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
if (!pgt || !(pgt->entries[index2])) {
|
||||
i+=PAGE_SIZE;
|
||||
j++;
|
||||
|
@ -509,7 +507,7 @@ size_t vm_alloc(uint32_t npages, uint32_t flags)
|
|||
if (flags & MAP_KERNEL_SPACE)
|
||||
spinlock_unlock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||
spinlock_irqsave_unlock(&task->page_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -519,22 +517,22 @@ int unmap_region(size_t viraddr, uint32_t npages)
|
|||
task_t* task = per_core(current_task);
|
||||
uint32_t i;
|
||||
uint32_t index1, index2;
|
||||
page_table_t* pgt;
|
||||
page_map_t* pgt;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
|
||||
if (BUILTIN_EXPECT(!task || !task->page_map || !paging_enabled, 0))
|
||||
return -EINVAL;
|
||||
|
||||
if (viraddr <= KERNEL_SPACE)
|
||||
spinlock_lock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_lock(&task->pgd_lock);
|
||||
spinlock_irqsave_lock(&task->page_lock);
|
||||
|
||||
for(i=0; i<npages; i++, viraddr+=PAGE_SIZE)
|
||||
{
|
||||
index1 = viraddr >> 22;
|
||||
index2 = (viraddr >> 12) & 0x3FF;
|
||||
|
||||
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
pgt = (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
if (!pgt)
|
||||
continue;
|
||||
pgt->entries[index2] &= ~PG_PRESENT;
|
||||
|
@ -548,7 +546,7 @@ int unmap_region(size_t viraddr, uint32_t npages)
|
|||
if (viraddr <= KERNEL_SPACE)
|
||||
spinlock_unlock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||
spinlock_irqsave_unlock(&task->page_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -558,22 +556,22 @@ int vm_free(size_t viraddr, uint32_t npages)
|
|||
task_t* task = per_core(current_task);
|
||||
uint32_t i;
|
||||
uint32_t index1, index2;
|
||||
page_table_t* pgt;
|
||||
page_map_t* pgt;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
|
||||
if (BUILTIN_EXPECT(!task || !task->page_map || !paging_enabled, 0))
|
||||
return -EINVAL;
|
||||
|
||||
if (viraddr <= KERNEL_SPACE)
|
||||
spinlock_lock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_lock(&task->pgd_lock);
|
||||
spinlock_irqsave_lock(&task->page_lock);
|
||||
|
||||
for(i=0; i<npages; i++, viraddr+=PAGE_SIZE)
|
||||
{
|
||||
index1 = viraddr >> 22;
|
||||
index2 = (viraddr >> 12) & 0x3FF;
|
||||
|
||||
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
pgt = (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
if (!pgt)
|
||||
continue;
|
||||
pgt->entries[index2] = 0;
|
||||
|
@ -584,7 +582,7 @@ int vm_free(size_t viraddr, uint32_t npages)
|
|||
if (viraddr <= KERNEL_SPACE)
|
||||
spinlock_unlock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||
spinlock_irqsave_unlock(&task->page_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -593,8 +591,8 @@ int print_paging_tree(size_t viraddr)
|
|||
{
|
||||
task_t* task = per_core(current_task);
|
||||
uint32_t index1, index2;
|
||||
page_dir_t* pgd = NULL;
|
||||
page_table_t* pgt = NULL;
|
||||
page_map_t* pgd = NULL;
|
||||
page_map_t* pgt = NULL;
|
||||
|
||||
if (BUILTIN_EXPECT(!viraddr, 0))
|
||||
return -EINVAL;
|
||||
|
@ -602,20 +600,20 @@ int print_paging_tree(size_t viraddr)
|
|||
index1 = viraddr >> 22;
|
||||
index2 = (viraddr >> 12) & 0x3FF;
|
||||
|
||||
spinlock_irqsave_lock(&task->pgd_lock);
|
||||
spinlock_irqsave_lock(&task->page_lock);
|
||||
|
||||
kprintf("Paging dump of address 0x%x\n", viraddr);
|
||||
pgd = task->pgd;
|
||||
pgd = task->page_map;
|
||||
kprintf("\tPage directory entry %u: ", index1);
|
||||
if (pgd) {
|
||||
kprintf("0x%0x\n", pgd->entries[index1]);
|
||||
pgt = (page_table_t*) (pgd->entries[index1] & PAGE_MASK);
|
||||
pgt = (page_map_t*) (pgd->entries[index1] & PAGE_MASK);
|
||||
} else
|
||||
kputs("invalid page directory\n");
|
||||
|
||||
/* convert physical address to virtual */
|
||||
if (paging_enabled && pgt)
|
||||
pgt = (page_table_t*) (KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE);
|
||||
pgt = (page_map_t*) (KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE);
|
||||
|
||||
kprintf("\tPage table entry %u: ", index2);
|
||||
if (pgt)
|
||||
|
@ -623,7 +621,7 @@ int print_paging_tree(size_t viraddr)
|
|||
else
|
||||
kputs("invalid page table\n");
|
||||
|
||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||
spinlock_irqsave_unlock(&task->page_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -631,12 +629,12 @@ int print_paging_tree(size_t viraddr)
|
|||
static void pagefault_handler(struct state *s)
|
||||
{
|
||||
task_t* task = per_core(current_task);
|
||||
page_dir_t* pgd = task->pgd;
|
||||
page_table_t* pgt = NULL;
|
||||
size_t viraddr = read_cr2();
|
||||
size_t phyaddr;
|
||||
#ifdef CONFIG_ROCKCREEK
|
||||
uint32_t index1, index2;
|
||||
page_map_t* pgd = task->page_map;
|
||||
page_map_t* pgt = NULL;
|
||||
#endif
|
||||
|
||||
if ((viraddr >= task->start_heap) && (viraddr <= task->end_heap) && (viraddr > KERNEL_SPACE)) {
|
||||
|
@ -661,7 +659,7 @@ static void pagefault_handler(struct state *s)
|
|||
index2 = (viraddr >> 12) & 0x3FF;
|
||||
if (!pgd || !(pgd->entries[index1] & PAGE_MASK))
|
||||
goto default_handler;
|
||||
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
pgt = (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
if (!pgt || !(pgt->entries[index2]))
|
||||
goto default_handler;
|
||||
if (pgt->entries[index2] & PG_SVM_INIT) {
|
||||
|
@ -687,7 +685,7 @@ default_handler:
|
|||
int arch_paging_init(void)
|
||||
{
|
||||
uint32_t i, npages, index1, index2;
|
||||
page_table_t* pgt;
|
||||
page_map_t* pgt;
|
||||
size_t viraddr;
|
||||
|
||||
// uninstall default handler and install our own
|
||||
|
@ -703,15 +701,15 @@ int arch_paging_init(void)
|
|||
index2 = (viraddr >> 12) & 0x3FF;
|
||||
|
||||
// now, we create a self reference
|
||||
per_core(current_task)->pgd->entries[index1] = (((size_t) pgt) & PAGE_MASK)|KERN_TABLE;
|
||||
pgt->entries[index2] = ((size_t) pgt & 0xFFFFF000)|KERN_PAGE;
|
||||
per_core(current_task)->page_map->entries[index1] = (((size_t) pgt) & PAGE_MASK)|KERN_TABLE;
|
||||
pgt->entries[index2] = ((size_t) pgt & PAGE_MASK)|KERN_PAGE;
|
||||
|
||||
// create the other PGTs for the kernel space
|
||||
for(i=0; i<KERNEL_SPACE/(1024*PAGE_SIZE)-1; i++) {
|
||||
size_t phyaddr = boot_pgt+i;
|
||||
|
||||
memset((void*) phyaddr, 0x00, sizeof(page_table_t));
|
||||
per_core(current_task)->pgd->entries[i] = (phyaddr & PAGE_MASK)|KERN_TABLE;
|
||||
memset((void*) phyaddr, 0x00, sizeof(page_map_t));
|
||||
per_core(current_task)->page_map->entries[i] = (phyaddr & PAGE_MASK)|KERN_TABLE;
|
||||
pgt->entries[i] = (phyaddr & PAGE_MASK)|KERN_PAGE;
|
||||
}
|
||||
|
||||
|
@ -738,9 +736,7 @@ int arch_paging_init(void)
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_MULTIBOOT
|
||||
/*
|
||||
* of course, mb_info has to map into the kernel space
|
||||
*/
|
||||
// map mb_info into the kernel space
|
||||
if (mb_info)
|
||||
map_region((size_t) mb_info & PAGE_MASK, (size_t) mb_info & PAGE_MASK, 1, MAP_KERNEL_SPACE);
|
||||
|
||||
|
@ -805,7 +801,7 @@ int arch_paging_init(void)
|
|||
kprintf("Map FPGA regsiters at 0x%x\n", viraddr);
|
||||
#endif
|
||||
|
||||
/* enable paging */
|
||||
// enable paging
|
||||
write_cr3((uint32_t) &boot_pgd);
|
||||
i = read_cr0();
|
||||
i = i | (1 << 31);
|
||||
|
|
|
@ -31,21 +31,15 @@
|
|||
#include <asm/irq.h>
|
||||
#include <asm/multiboot.h>
|
||||
#include <asm/apic.h>
|
||||
#ifdef CONFIG_ROCKCREEK
|
||||
#include <asm/RCCE_lib.h>
|
||||
#include <asm/SCC_API.h>
|
||||
#include <asm/svm.h>
|
||||
#include <asm/icc.h>
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Virtual Memory Layout of the standard configuration
|
||||
* (1 GB kernel space)
|
||||
*
|
||||
* 0x00000000 - 0x000FFFFF: reserved for IO devices (16MB)
|
||||
* 0x00100000 - 0x0DEADFFF: Kernel (size depends on the configuration) (221MB)
|
||||
* 0x0DEAE000 - 0x3FFFFFFF: Kernel heap
|
||||
*
|
||||
* 0x000000000000 - 0x0000000FFFFF: reserved for IO devices (16MB)
|
||||
* 0x000000100000 - 0x00000DEADFFF: Kernel (size depends on the configuration) (221MB)
|
||||
* 0x00000DEAE000 - 0x00003FFFFFFF: Kernel heap
|
||||
* 0xFF8000000000 - 0xFFFFFFFFFFFF: Paging structures are mapped in this region (1GB)
|
||||
*/
|
||||
|
||||
/*
|
||||
|
@ -55,17 +49,22 @@
|
|||
extern const void kernel_start;
|
||||
extern const void kernel_end;
|
||||
|
||||
// boot task's page directory and p:age directory lock
|
||||
extern page_dir_t boot_pgd; // TODO: initialization done in entry64.asm
|
||||
// boot task's page directory and page directory lock
|
||||
extern page_map_t boot_pml4;
|
||||
static spinlock_t kslock = SPINLOCK_INIT;
|
||||
static int paging_enabled = 0;
|
||||
|
||||
page_dir_t* get_boot_pgd(void)
|
||||
/*static page_map_t boot_pml4 = {{[0 ... MAP_ENTRIES-1] = 0}};
|
||||
static page_map_t boot_pdpt = {{[0 ... MAP_ENTRIES-1] = 0}};
|
||||
static page_map_t boot_pgd = {{[0 ... MAP_ENTRIES-1] = 0}};
|
||||
static page_map_t boot_pgt = {{[0 ... MAP_ENTRIES-1] = 0}};*/
|
||||
|
||||
page_map_t* get_boot_page_map(void)
|
||||
{
|
||||
return &boot_pgd;
|
||||
return &boot_pml4;
|
||||
}
|
||||
|
||||
int create_pgd(task_t* task, int copy)
|
||||
int create_page_map(task_t* task, int copy)
|
||||
{
|
||||
// TODO: Currently, we support only kernel tasks
|
||||
// => all tasks are able to use the same pgd
|
||||
|
@ -73,18 +72,15 @@ int create_pgd(task_t* task, int copy)
|
|||
if (BUILTIN_EXPECT(!paging_enabled, 0))
|
||||
return -EINVAL;
|
||||
|
||||
task->pgd = get_boot_pgd();
|
||||
task->page_map = get_boot_page_map();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* drops all page frames and the PGD of a user task
|
||||
*/
|
||||
int drop_pgd(void)
|
||||
int drop_page_map(void)
|
||||
{
|
||||
#if 0
|
||||
page_dir_t* pgd = per_core(current_task)->pgd;
|
||||
page_map_t* pgd = per_core(current_task)->page_map;
|
||||
size_t phy_pgd = virt_to_phys((size_t) pgd);
|
||||
task_t* task = per_core(current_task);
|
||||
uint32_t i;
|
||||
|
@ -92,7 +88,7 @@ int drop_pgd(void)
|
|||
if (BUILTIN_EXPECT(pgd == &boot_pgd, 0))
|
||||
return -EINVAL;
|
||||
|
||||
spinlock_lock(&task->pgd_lock);
|
||||
spinlock_lock(&task->page_lock);
|
||||
|
||||
for(i=0; i<1024; i++) {
|
||||
if (pgd->entries[i] & PG_USER) {
|
||||
|
@ -104,9 +100,9 @@ int drop_pgd(void)
|
|||
// freeing the page directory
|
||||
put_page(phy_pgd);
|
||||
|
||||
task->pgd = NULL;
|
||||
task->page_map = NULL;
|
||||
|
||||
spinlock_unlock(&task->pgd_lock);
|
||||
spinlock_unlock(&task->page_lock);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
|
@ -115,36 +111,36 @@ int drop_pgd(void)
|
|||
size_t virt_to_phys(size_t viraddr)
|
||||
{
|
||||
task_t* task = per_core(current_task);
|
||||
uint16_t idx_pd4 = (viraddr >> 39) & 0x1FF;
|
||||
uint16_t idx_dirp = (viraddr >> 30) & 0x1FF;
|
||||
uint16_t idx_dir = (viraddr >> 21) & 0x1FF;
|
||||
uint16_t idx_table = (viraddr >> 12) & 0x1FF;
|
||||
page_table_t* pgt;
|
||||
page_map_t* pdpt, * pgd , * pgt;
|
||||
uint16_t index_pml4 = (viraddr >> 39) & 0x1FF;
|
||||
uint16_t index_pdpt = (viraddr >> 30) & 0x1FF;
|
||||
uint16_t index_pgd = (viraddr >> 21) & 0x1FF;
|
||||
uint16_t index_pgt = (viraddr >> 12) & 0x1FF;
|
||||
size_t ret = 0;
|
||||
|
||||
if (!paging_enabled)
|
||||
return viraddr;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd, 0))
|
||||
if (BUILTIN_EXPECT(!task || !task->page_map, 0))
|
||||
return 0;
|
||||
|
||||
spinlock_irqsave_lock(&task->pgd_lock);
|
||||
spinlock_irqsave_lock(&task->page_lock);
|
||||
|
||||
// TODO: Currently, we allocate pages only in kernel space.
|
||||
// => physical address of the page table is identical of the virtual address
|
||||
pgt = (page_table_t*) (task->pgd->entries[idx_pd4] & PAGE_MASK);
|
||||
pdpt = (page_map_t*) (task->page_map->entries[index_pml4] & PAGE_MASK);
|
||||
if (!pdpt)
|
||||
goto out;
|
||||
|
||||
pgd = (page_map_t*) (pdpt->entries[index_pdpt] & PAGE_MASK);
|
||||
if (!pgd)
|
||||
goto out;
|
||||
|
||||
pgt = (page_map_t*) (pgd->entries[index_pgd] & PAGE_MASK);
|
||||
if (!pgt)
|
||||
goto out;
|
||||
|
||||
pgt = (page_table_t*) (pgt->entries[idx_dirp] & PAGE_MASK);
|
||||
if (!pgt)
|
||||
goto out;
|
||||
|
||||
pgt = (page_table_t*) (pgt->entries[idx_dir] & PAGE_MASK);
|
||||
if (!pgt)
|
||||
goto out;
|
||||
|
||||
ret = (size_t) (pgt->entries[idx_table] & PAGE_MASK);
|
||||
ret = (size_t) (pgt->entries[index_pgt] & PAGE_MASK);
|
||||
if (!ret)
|
||||
goto out;
|
||||
|
||||
|
@ -152,7 +148,7 @@ size_t virt_to_phys(size_t viraddr)
|
|||
out:
|
||||
//kprintf("vir %p to phy %p\n", viraddr, ret);
|
||||
|
||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||
spinlock_irqsave_unlock(&task->page_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -160,10 +156,12 @@ out:
|
|||
size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flags)
|
||||
{
|
||||
task_t* task = per_core(current_task);
|
||||
page_table_t* pgt;
|
||||
page_map_t* pdpt, * pgd, * pgt;
|
||||
uint16_t index_pml4, index_pdpt;
|
||||
uint16_t index_pgd, index_pgt;
|
||||
size_t i, ret;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd, 0))
|
||||
if (BUILTIN_EXPECT(!task || !task->page_map, 0))
|
||||
return 0;
|
||||
|
||||
if (BUILTIN_EXPECT(!paging_enabled && (viraddr != phyaddr), 0))
|
||||
|
@ -172,7 +170,7 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
|
|||
if (flags & MAP_KERNEL_SPACE)
|
||||
spinlock_lock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_lock(&task->pgd_lock);
|
||||
spinlock_irqsave_lock(&task->page_lock);
|
||||
|
||||
if (!viraddr) {
|
||||
viraddr = vm_alloc(npages, flags);
|
||||
|
@ -185,26 +183,26 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
|
|||
|
||||
ret = viraddr;
|
||||
for(i=0; i<npages; i++, viraddr+=PAGE_SIZE, phyaddr+=PAGE_SIZE) {
|
||||
uint16_t idx_pd4 = (viraddr >> 39) & 0x1FF;
|
||||
uint16_t idx_dirp = (viraddr >> 30) & 0x1FF;
|
||||
uint16_t idx_dir = (viraddr >> 21) & 0x1FF;
|
||||
uint16_t idx_table = (viraddr >> 12) & 0x1FF;
|
||||
index_pml4 = (viraddr >> 39) & 0x1FF;
|
||||
index_pdpt = (viraddr >> 30) & 0x1FF;
|
||||
index_pgd = (viraddr >> 21) & 0x1FF;
|
||||
index_pgt = (viraddr >> 12) & 0x1FF;
|
||||
|
||||
pgt = (page_table_t*) (task->pgd->entries[idx_pd4] & PAGE_MASK);
|
||||
if (!pgt) {
|
||||
kputs("map_region: out of memory\n");
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
pgt = (page_table_t*) (pgt->entries[idx_dirp] & PAGE_MASK);
|
||||
pdpt = (page_map_t*) (task->page_map->entries[index_pml4] & PAGE_MASK);
|
||||
if (!pgt) {
|
||||
kputs("map_region: out of memory\n");
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
pgt = (page_table_t*) (pgt->entries[idx_dir] & PAGE_MASK);
|
||||
pgd = (page_map_t*) (pdpt->entries[index_pdpt] & PAGE_MASK);
|
||||
if (!pgd) {
|
||||
kputs("map_region: out of memory\n");
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
pgt = (page_map_t*) (pgd->entries[index_pgd] & PAGE_MASK);
|
||||
if (!pgt) {
|
||||
kputs("map_region: out of memory\n");
|
||||
ret = 0;
|
||||
|
@ -215,27 +213,27 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
|
|||
// Currently, we allocate pages only in kernel space.
|
||||
// => physical address of the page table is identical of the virtual address
|
||||
//if (paging_enabled)
|
||||
// pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index*PAGE_SIZE) & PAGE_MASK);
|
||||
// pgt = (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index*PAGE_SIZE) & PAGE_MASK);
|
||||
|
||||
if (pgt->entries[idx_table] && !(flags & MAP_REMAP)) {
|
||||
if (pgt->entries[index_pgt] && !(flags & MAP_REMAP)) {
|
||||
kprintf("0x%x is already mapped\n", viraddr);
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (flags & MAP_USER_SPACE)
|
||||
pgt->entries[idx_table] = USER_PAGE|(phyaddr & PAGE_MASK);
|
||||
pgt->entries[index_pgt] = USER_PAGE|(phyaddr & PAGE_MASK);
|
||||
else
|
||||
pgt->entries[idx_table] = KERN_PAGE|(phyaddr & PAGE_MASK);
|
||||
pgt->entries[index_pgt] = KERN_PAGE|(phyaddr & PAGE_MASK);
|
||||
|
||||
if (flags & MAP_NO_CACHE)
|
||||
pgt->entries[idx_table] |= PG_PCD;
|
||||
pgt->entries[index_pgt] |= PG_PCD;
|
||||
|
||||
if (flags & MAP_NO_ACCESS)
|
||||
pgt->entries[idx_table] &= ~PG_PRESENT;
|
||||
pgt->entries[index_pgt] &= ~PG_PRESENT;
|
||||
|
||||
if (flags & MAP_WT)
|
||||
pgt->entries[idx_table] |= PG_PWT;
|
||||
pgt->entries[index_pgt] |= PG_PWT;
|
||||
|
||||
if (flags & MAP_USER_SPACE)
|
||||
atomic_int32_inc(&task->user_usage);
|
||||
|
@ -247,7 +245,7 @@ out:
|
|||
if (flags & MAP_KERNEL_SPACE)
|
||||
spinlock_unlock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||
spinlock_irqsave_unlock(&task->page_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -258,18 +256,18 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
|
|||
uint32_t index1, index2, newflags;
|
||||
size_t viraddr = start & PAGE_MASK;
|
||||
size_t phyaddr;
|
||||
page_table_t* pgt;
|
||||
page_dir_t* pgd;
|
||||
page_map_t* pgt;
|
||||
page_map_t* pgd;
|
||||
task_t* task = per_core(current_task);
|
||||
|
||||
if (BUILTIN_EXPECT(!paging_enabled, 0))
|
||||
return -EINVAL;
|
||||
|
||||
pgd = per_core(current_task)->pgd;
|
||||
pgd = per_core(current_task)->page_map;
|
||||
if (BUILTIN_EXPECT(!pgd, 0))
|
||||
return -EINVAL;
|
||||
|
||||
spinlock_lock(&task->pgd_lock);
|
||||
spinlock_lock(&task->page_lock);
|
||||
|
||||
while (viraddr < end)
|
||||
{
|
||||
|
@ -277,7 +275,7 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
|
|||
index2 = (viraddr >> 12) & 0x3FF;
|
||||
|
||||
while ((viraddr < end) && (index2 < 1024)) {
|
||||
pgt = (page_table_t*) (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
pgt = (page_map_t*) (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
if (pgt && pgt->entries[index2]) {
|
||||
phyaddr = pgt->entries[index2] & PAGE_MASK;
|
||||
newflags = pgt->entries[index2] & 0xFFF; // get old flags
|
||||
|
@ -292,16 +290,8 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
|
|||
// update flags
|
||||
if (!(flags & VMA_WRITE)) {
|
||||
newflags &= ~PG_RW;
|
||||
#ifdef CONFIG_ROCKCREEK
|
||||
if (newflags & (PG_SVM_STRONG|PG_SVM_LAZYRELEASE))
|
||||
newflags &= ~PG_MPE;
|
||||
#endif
|
||||
} else {
|
||||
newflags |= PG_RW;
|
||||
#ifdef CONFIG_ROCKCREEK
|
||||
if (newflags & (PG_SVM_STRONG|PG_SVM_LAZYRELEASE))
|
||||
newflags |= PG_MPE;
|
||||
#endif
|
||||
}
|
||||
|
||||
pgt->entries[index2] = (newflags & 0xFFF) | (phyaddr & PAGE_MASK);
|
||||
|
@ -314,7 +304,7 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
|
|||
}
|
||||
}
|
||||
|
||||
spinlock_unlock(&task->pgd_lock);
|
||||
spinlock_unlock(&task->page_lock);
|
||||
#endif
|
||||
|
||||
return -EINVAL;
|
||||
|
@ -330,9 +320,11 @@ size_t vm_alloc(uint32_t npages, uint32_t flags)
|
|||
task_t* task = per_core(current_task);
|
||||
size_t viraddr, i, j, ret = 0;
|
||||
size_t start, end;
|
||||
page_table_t* pgt;
|
||||
page_map_t* pdpt, * pgd, * pgt;
|
||||
uint16_t index_pml4, index_pdpt;
|
||||
uint16_t index_pgd, index_pgt;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
|
||||
if (BUILTIN_EXPECT(!task || !task->page_map || !paging_enabled, 0))
|
||||
return 0;
|
||||
|
||||
if (flags & MAP_KERNEL_SPACE) {
|
||||
|
@ -349,40 +341,40 @@ size_t vm_alloc(uint32_t npages, uint32_t flags)
|
|||
if (flags & MAP_KERNEL_SPACE)
|
||||
spinlock_lock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_lock(&task->pgd_lock);
|
||||
spinlock_irqsave_lock(&task->page_lock);
|
||||
|
||||
viraddr = i = start;
|
||||
j = 0;
|
||||
do {
|
||||
uint16_t idx_pd4 = (viraddr >> 39) & 0x1FF;
|
||||
uint16_t idx_dirp = (viraddr >> 30) & 0x1FF;
|
||||
uint16_t idx_dir = (viraddr >> 21) & 0x1FF;
|
||||
uint16_t idx_table = (viraddr >> 12) & 0x1FF;
|
||||
index_pml4 = (viraddr >> 39) & 0x1FF;
|
||||
index_pdpt = (viraddr >> 30) & 0x1FF;
|
||||
index_pgd = (viraddr >> 21) & 0x1FF;
|
||||
index_pgt = (viraddr >> 12) & 0x1FF;
|
||||
|
||||
// Currently, we allocate pages only in kernel space.
|
||||
// => physical address of the page table is identical of the virtual address
|
||||
pgt = (page_table_t*) (task->pgd->entries[idx_pd4] & PAGE_MASK);
|
||||
if (!pgt) {
|
||||
i += (size_t)PGT_ENTRIES*PGT_ENTRIES*PGT_ENTRIES*PAGE_SIZE;
|
||||
j += PGT_ENTRIES*PGT_ENTRIES*PGT_ENTRIES;
|
||||
pdpt = (page_map_t*) (task->page_map->entries[index_pml4] & PAGE_MASK);
|
||||
if (!pdpt) {
|
||||
i += (size_t)MAP_ENTRIES*MAP_ENTRIES*MAP_ENTRIES*PAGE_SIZE;
|
||||
j += MAP_ENTRIES*MAP_ENTRIES*MAP_ENTRIES;
|
||||
continue;
|
||||
}
|
||||
|
||||
pgt = (page_table_t*) (pgt->entries[idx_dirp] & PAGE_MASK);
|
||||
if (!pgt) {
|
||||
i += PGT_ENTRIES*PGT_ENTRIES*PAGE_SIZE;
|
||||
j += PGT_ENTRIES*PGT_ENTRIES;
|
||||
pgd = (page_map_t*) (pdpt->entries[index_pdpt] & PAGE_MASK);
|
||||
if (!pgd) {
|
||||
i += MAP_ENTRIES*MAP_ENTRIES*PAGE_SIZE;
|
||||
j += MAP_ENTRIES*MAP_ENTRIES;
|
||||
continue;
|
||||
}
|
||||
|
||||
pgt = (page_table_t*) (pgt->entries[idx_dir] & PAGE_MASK);
|
||||
pgt = (page_map_t*) (pgd->entries[index_pgd] & PAGE_MASK);
|
||||
if (!pgt) {
|
||||
i += PGT_ENTRIES*PAGE_SIZE;
|
||||
j += PGT_ENTRIES;
|
||||
i += MAP_ENTRIES*PAGE_SIZE;
|
||||
j += MAP_ENTRIES;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!(pgt->entries[idx_table])) {
|
||||
if (!(pgt->entries[index_pgt])) {
|
||||
i += PAGE_SIZE;
|
||||
j++;
|
||||
} else {
|
||||
|
@ -399,7 +391,7 @@ size_t vm_alloc(uint32_t npages, uint32_t flags)
|
|||
if (flags & MAP_KERNEL_SPACE)
|
||||
spinlock_unlock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||
spinlock_irqsave_unlock(&task->page_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -407,56 +399,56 @@ size_t vm_alloc(uint32_t npages, uint32_t flags)
|
|||
int unmap_region(size_t viraddr, uint32_t npages)
|
||||
{
|
||||
task_t* task = per_core(current_task);
|
||||
page_table_t* pgt;
|
||||
page_map_t* pdpt, * pgd, * pgt;
|
||||
size_t i;
|
||||
uint16_t idx_pd4, idx_dirp;
|
||||
uint16_t idx_dir, idx_table;
|
||||
uint16_t index_pml4, index_pdpt;
|
||||
uint16_t index_pgd, index_pgt;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
|
||||
if (BUILTIN_EXPECT(!task || !task->page_map || !paging_enabled, 0))
|
||||
return -EINVAL;
|
||||
|
||||
if (viraddr <= KERNEL_SPACE)
|
||||
spinlock_lock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_lock(&task->pgd_lock);
|
||||
spinlock_irqsave_lock(&task->page_lock);
|
||||
|
||||
i = 0;
|
||||
while(i<npages)
|
||||
{
|
||||
idx_pd4 = (viraddr >> 39) & 0x1FF;
|
||||
idx_dirp = (viraddr >> 30) & 0x1FF;
|
||||
idx_dir = (viraddr >> 21) & 0x1FF;
|
||||
idx_table = (viraddr >> 12) & 0x1FF;
|
||||
index_pml4 = (viraddr >> 39) & 0x1FF;
|
||||
index_pdpt = (viraddr >> 30) & 0x1FF;
|
||||
index_pgd = (viraddr >> 21) & 0x1FF;
|
||||
index_pgt = (viraddr >> 12) & 0x1FF;
|
||||
|
||||
// Currently, we allocate pages only in kernel space.
|
||||
// => physical address of the page table is identical of the virtual address
|
||||
pgt = (page_table_t*) (task->pgd->entries[idx_pd4] & PAGE_MASK);
|
||||
if (!pgt) {
|
||||
viraddr += (size_t) PGT_ENTRIES*PGT_ENTRIES*PGT_ENTRIES*PAGE_SIZE;
|
||||
i += PGT_ENTRIES*PGT_ENTRIES*PGT_ENTRIES;
|
||||
pdpt = (page_map_t*) (task->page_map->entries[index_pml4] & PAGE_MASK);
|
||||
if (!pdpt) {
|
||||
viraddr += (size_t) MAP_ENTRIES*MAP_ENTRIES*MAP_ENTRIES*PAGE_SIZE;
|
||||
i += MAP_ENTRIES*MAP_ENTRIES*MAP_ENTRIES;
|
||||
continue;
|
||||
}
|
||||
|
||||
pgt = (page_table_t*) (pgt->entries[idx_dirp] & PAGE_MASK);
|
||||
if (!pgt) {
|
||||
viraddr += PGT_ENTRIES*PGT_ENTRIES*PAGE_SIZE;
|
||||
i += PGT_ENTRIES*PGT_ENTRIES;
|
||||
pgd = (page_map_t*) (pdpt->entries[index_pdpt] & PAGE_MASK);
|
||||
if (!pgd) {
|
||||
viraddr += MAP_ENTRIES*MAP_ENTRIES*PAGE_SIZE;
|
||||
i += MAP_ENTRIES*MAP_ENTRIES;
|
||||
continue;
|
||||
}
|
||||
|
||||
pgt = (page_table_t*) (pgt->entries[idx_dir] & PAGE_MASK);
|
||||
pgt = (page_map_t*) (pgd->entries[index_pgd] & PAGE_MASK);
|
||||
if (!pgt) {
|
||||
viraddr += PGT_ENTRIES*PAGE_SIZE;
|
||||
i += PGT_ENTRIES;
|
||||
viraddr += MAP_ENTRIES*PAGE_SIZE;
|
||||
i += MAP_ENTRIES;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pgt->entries[idx_table])
|
||||
pgt->entries[idx_table] &= ~PG_PRESENT;
|
||||
if (pgt->entries[index_pgt])
|
||||
pgt->entries[index_pgt] &= ~PG_PRESENT;
|
||||
|
||||
viraddr +=PAGE_SIZE;
|
||||
i++;
|
||||
|
||||
|
||||
if (viraddr > KERNEL_SPACE)
|
||||
atomic_int32_dec(&task->user_usage);
|
||||
|
||||
|
@ -466,7 +458,7 @@ int unmap_region(size_t viraddr, uint32_t npages)
|
|||
if (viraddr <= KERNEL_SPACE)
|
||||
spinlock_unlock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||
spinlock_irqsave_unlock(&task->page_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -474,52 +466,52 @@ int unmap_region(size_t viraddr, uint32_t npages)
|
|||
int vm_free(size_t viraddr, uint32_t npages)
|
||||
{
|
||||
task_t* task = per_core(current_task);
|
||||
page_table_t* pgt;
|
||||
page_map_t* pdpt, * pgd, * pgt;
|
||||
size_t i;
|
||||
uint16_t idx_pd4, idx_dirp;
|
||||
uint16_t idx_dir, idx_table;
|
||||
uint16_t index_pml4, index_pdpt;
|
||||
uint16_t index_pgd, index_pgt;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
|
||||
if (BUILTIN_EXPECT(!task || !task->page_map || !paging_enabled, 0))
|
||||
return -EINVAL;
|
||||
|
||||
if (viraddr <= KERNEL_SPACE)
|
||||
spinlock_lock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_lock(&task->pgd_lock);
|
||||
spinlock_irqsave_lock(&task->page_lock);
|
||||
|
||||
i = 0;
|
||||
while(i<npages)
|
||||
{
|
||||
idx_pd4 = (viraddr >> 39) & 0x1FF;
|
||||
idx_dirp = (viraddr >> 30) & 0x1FF;
|
||||
idx_dir = (viraddr >> 21) & 0x1FF;
|
||||
idx_table = (viraddr >> 12) & 0x1FF;
|
||||
index_pml4 = (viraddr >> 39) & 0x1FF;
|
||||
index_pdpt = (viraddr >> 30) & 0x1FF;
|
||||
index_pgd = (viraddr >> 21) & 0x1FF;
|
||||
index_pgt = (viraddr >> 12) & 0x1FF;
|
||||
|
||||
// Currently, we allocate pages only in kernel space.
|
||||
// => physical address of the page table is identical of the virtual address
|
||||
pgt = (page_table_t*) (task->pgd->entries[idx_pd4] & PAGE_MASK);
|
||||
if (!pgt) {
|
||||
viraddr += (size_t) PGT_ENTRIES*PGT_ENTRIES*PGT_ENTRIES*PAGE_SIZE;
|
||||
i += PGT_ENTRIES*PGT_ENTRIES*PGT_ENTRIES;
|
||||
pdpt = (page_map_t*) (task->page_map->entries[index_pml4] & PAGE_MASK);
|
||||
if (!pdpt) {
|
||||
viraddr += (size_t) MAP_ENTRIES*MAP_ENTRIES*MAP_ENTRIES*PAGE_SIZE;
|
||||
i += MAP_ENTRIES*MAP_ENTRIES*MAP_ENTRIES;
|
||||
continue;
|
||||
}
|
||||
|
||||
pgt = (page_table_t*) (pgt->entries[idx_dirp] & PAGE_MASK);
|
||||
if (!pgt) {
|
||||
viraddr += PGT_ENTRIES*PGT_ENTRIES*PAGE_SIZE;
|
||||
i += PGT_ENTRIES*PGT_ENTRIES;
|
||||
pgd = (page_map_t*) (pdpt->entries[index_pdpt] & PAGE_MASK);
|
||||
if (!pgd) {
|
||||
viraddr += MAP_ENTRIES*MAP_ENTRIES*PAGE_SIZE;
|
||||
i += MAP_ENTRIES*MAP_ENTRIES;
|
||||
continue;
|
||||
}
|
||||
|
||||
pgt = (page_table_t*) (pgt->entries[idx_dir] & PAGE_MASK);
|
||||
pgt = (page_map_t*) (pgd->entries[index_pgd] & PAGE_MASK);
|
||||
if (!pgt) {
|
||||
viraddr += PGT_ENTRIES*PAGE_SIZE;
|
||||
i += PGT_ENTRIES;
|
||||
viraddr += MAP_ENTRIES*PAGE_SIZE;
|
||||
i += MAP_ENTRIES;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pgt->entries[idx_table])
|
||||
pgt->entries[idx_table] = 0;
|
||||
if (pgt->entries[index_pgt])
|
||||
pgt->entries[index_pgt] = 0;
|
||||
|
||||
viraddr +=PAGE_SIZE;
|
||||
i++;
|
||||
|
@ -530,7 +522,7 @@ int vm_free(size_t viraddr, uint32_t npages)
|
|||
if (viraddr <= KERNEL_SPACE)
|
||||
spinlock_unlock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||
spinlock_irqsave_unlock(&task->page_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -538,8 +530,8 @@ int vm_free(size_t viraddr, uint32_t npages)
|
|||
static void pagefault_handler(struct state *s)
|
||||
{
|
||||
task_t* task = per_core(current_task);
|
||||
//page_dir_t* pgd = task->pgd;
|
||||
//page_table_t* pgt = NULL;
|
||||
//page_map_t* pgd = task->page_map;
|
||||
//page_map_t* pgt = NULL;
|
||||
size_t viraddr = read_cr2();
|
||||
//size_t phyaddr;
|
||||
|
||||
|
|
|
@ -62,7 +62,7 @@ extern "C" {
|
|||
#define TASK_L2 (1 << 3)
|
||||
|
||||
typedef int (*entry_point_t)(void*);
|
||||
struct page_dir;
|
||||
typedef struct page_map page_map_t;
|
||||
|
||||
/** @brief The task_t structure */
|
||||
typedef struct task {
|
||||
|
@ -88,10 +88,10 @@ typedef struct task {
|
|||
uint32_t last_core;
|
||||
/// usage in number of pages
|
||||
atomic_int32_t user_usage;
|
||||
/// avoids concurrent access to the page directory
|
||||
spinlock_irqsave_t pgd_lock;
|
||||
/// pointer to the page directory
|
||||
struct page_dir* pgd;
|
||||
/// avoids concurrent access to the page map structures
|
||||
spinlock_irqsave_t page_lock;
|
||||
/// pointer to page directory (32bit) or page map level 4 (64bit) table respectively
|
||||
page_map_t* page_map;
|
||||
/// lock for the VMA_list
|
||||
spinlock_t vma_lock;
|
||||
/// list of VMAs
|
||||
|
|
|
@ -104,7 +104,7 @@ int multitasking_init(void) {
|
|||
|
||||
mailbox_wait_msg_init(&task_table[0].inbox);
|
||||
memset(task_table[0].outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS);
|
||||
task_table[0].pgd = get_boot_pgd();
|
||||
task_table[0].page_map = get_boot_page_map();
|
||||
task_table[0].flags = TASK_DEFAULT_FLAGS;
|
||||
task_table[0].prio = IDLE_PRIO;
|
||||
task_table[0].stack = (void*) &boot_stack;
|
||||
|
@ -128,7 +128,7 @@ size_t get_idle_task(uint32_t id)
|
|||
atomic_int32_set(&task_table[id].user_usage, 0);
|
||||
mailbox_wait_msg_init(&task_table[id].inbox);
|
||||
memset(task_table[id].outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS);
|
||||
task_table[id].pgd = get_boot_pgd();
|
||||
task_table[id].page_map = get_boot_page_map();
|
||||
current_task[id].var = task_table+id;
|
||||
runqueues[id].idle = task_table+id;
|
||||
|
||||
|
@ -242,7 +242,7 @@ static void NORETURN do_exit(int arg) {
|
|||
|
||||
spinlock_unlock(&curr_task->vma_lock);
|
||||
|
||||
drop_pgd(); // delete page directory and its page tables
|
||||
drop_page_map(); // delete page directory and its page tables
|
||||
|
||||
#if 0
|
||||
if (atomic_int32_read(&curr_task->user_usage))
|
||||
|
@ -327,7 +327,7 @@ static int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio, uin
|
|||
if (task_table[i].status == TASK_INVALID) {
|
||||
atomic_int32_set(&task_table[i].user_usage, 0);
|
||||
|
||||
ret = create_pgd(task_table+i, 0);
|
||||
ret = create_page_map(task_table+i, 0);
|
||||
if (ret < 0) {
|
||||
ret = -ENOMEM;
|
||||
goto create_task_out;
|
||||
|
@ -400,7 +400,7 @@ int sys_fork(void)
|
|||
if (task_table[i].status == TASK_INVALID) {
|
||||
atomic_int32_set(&task_table[i].user_usage, 0);
|
||||
|
||||
ret = create_pgd(task_table+i, 1);
|
||||
ret = create_page_map(task_table+i, 1);
|
||||
if (ret < 0) {
|
||||
ret = -ENOMEM;
|
||||
goto create_task_out;
|
||||
|
|
Loading…
Add table
Reference in a new issue