refactored paging code to be more universial and suitable for 64bit paging
This commit is contained in:
parent
e290d41149
commit
9b47b3ef45
7 changed files with 266 additions and 289 deletions
|
@ -84,33 +84,22 @@
|
||||||
/// This is a whole set of flags (PRESENT,RW,USER) for userspace pages
|
/// This is a whole set of flags (PRESENT,RW,USER) for userspace pages
|
||||||
#define USER_PAGE (PG_PRESENT|PG_RW|PG_USER)
|
#define USER_PAGE (PG_PRESENT|PG_RW|PG_USER)
|
||||||
|
|
||||||
#if __SIZEOF_POINTER__ == 4
|
#ifdef CONFIG_X86_32
|
||||||
#define PGT_ENTRIES 1024
|
/// On a 32-bit system, each page map structure consists of 1024 entries (= 2^10)
|
||||||
#elif __SIZEOF_POINTER__ == 8
|
#define MAP_ENTRIES 1024
|
||||||
#define PGT_ENTRIES 512
|
#elif defined(CONFIG_X86_64)
|
||||||
|
/// On a 64-bit system, each page map structure consists of 512 entries (= 2^9)
|
||||||
|
#define MAP_ENTRIES 512
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/** @brief Page table structure
|
/** @brief General page map structure
|
||||||
*
|
*
|
||||||
* This structure keeps page table entries.\n
|
* This page map structure is a general type for all indirecton levels.\n
|
||||||
* On a 32bit system, a page table consists normally of 1024 entries.
|
* As all page map levels containing the same amount of entries.
|
||||||
*/
|
*/
|
||||||
typedef struct page_table
|
typedef struct page_map {
|
||||||
{
|
size_t entries[MAP_ENTRIES];
|
||||||
/// Page table entries are unsigned 32bit integers.
|
} __attribute__ ((aligned (4096))) page_map_t;
|
||||||
size_t entries[PGT_ENTRIES];
|
|
||||||
} page_table_t __attribute__ ((aligned (4096)));
|
|
||||||
|
|
||||||
/** @brief Page directory structure
|
|
||||||
*
|
|
||||||
* This structure keeps page directory entries.\
|
|
||||||
* On a 32bit system, a page directory consists normally of 1024 entries.
|
|
||||||
*/
|
|
||||||
typedef struct page_dir
|
|
||||||
{
|
|
||||||
/// Page dir entries are unsigned 32bit integers.
|
|
||||||
size_t entries[PGT_ENTRIES];
|
|
||||||
} page_dir_t __attribute__ ((aligned (4096)));
|
|
||||||
|
|
||||||
/** @brief Converts a virtual address to a physical
|
/** @brief Converts a virtual address to a physical
|
||||||
*
|
*
|
||||||
|
@ -192,7 +181,7 @@ int arch_paging_init(void);
|
||||||
*
|
*
|
||||||
* @return Returns the address of the boot task's page dir array.
|
* @return Returns the address of the boot task's page dir array.
|
||||||
*/
|
*/
|
||||||
page_dir_t* get_boot_pgd(void);
|
page_map_t* get_boot_page_map(void);
|
||||||
|
|
||||||
/** @brief Setup a new page directory for a new user-level task
|
/** @brief Setup a new page directory for a new user-level task
|
||||||
*
|
*
|
||||||
|
@ -203,18 +192,18 @@ page_dir_t* get_boot_pgd(void);
|
||||||
* - counter of allocated page tables
|
* - counter of allocated page tables
|
||||||
* - -ENOMEM (-12) on failure
|
* - -ENOMEM (-12) on failure
|
||||||
*/
|
*/
|
||||||
int create_pgd(task_t* task, int copy);
|
int create_page_map(task_t* task, int copy);
|
||||||
|
|
||||||
/** @brief Delete page directory and its page tables
|
/** @brief Delete all page map structures of the current task
|
||||||
*
|
*
|
||||||
* Puts page tables and page directory back to buffer and
|
* Puts PML4, PDPT, PGD, PGT tables back to buffer and
|
||||||
* sets the task's page directory pointer to NULL
|
* sets the task's page map pointer to NULL
|
||||||
*
|
*
|
||||||
* @return
|
* @return
|
||||||
* - 0 on success
|
* - 0 on success
|
||||||
* - -EINVAL (-22) on failure (in case PGD is still the boot-pgd).
|
* - -EINVAL (-22) on failure (in case PGD is still the boot-pgd).
|
||||||
*/
|
*/
|
||||||
int drop_pgd(void);
|
int drop_page_map(void);
|
||||||
|
|
||||||
/** @brief Change the page permission in the page tables of the current task
|
/** @brief Change the page permission in the page tables of the current task
|
||||||
*
|
*
|
||||||
|
|
|
@ -86,10 +86,10 @@ startup_stack:
|
||||||
|
|
||||||
SECTION .data
|
SECTION .data
|
||||||
; create default page tables for the 64bit kernel
|
; create default page tables for the 64bit kernel
|
||||||
global boot_pgd ; aka PML4
|
global boot_pml4
|
||||||
ALIGN 4096 ; of course, the page tables have to be page aligned
|
ALIGN 4096 ; of course, the page tables have to be page aligned
|
||||||
NOPTS equ 512
|
NOPTS equ 512
|
||||||
boot_pgd times 512 DQ 0
|
boot_pml4 times 512 DQ 0
|
||||||
boot_pdpt times 512 DQ 0
|
boot_pdpt times 512 DQ 0
|
||||||
boot_pd times 512 DQ 0
|
boot_pd times 512 DQ 0
|
||||||
boot_pt times (NOPTS*512) DQ 0
|
boot_pt times (NOPTS*512) DQ 0
|
||||||
|
@ -113,7 +113,7 @@ smp_entry:
|
||||||
mov cr4, eax
|
mov cr4, eax
|
||||||
|
|
||||||
; initialize page table
|
; initialize page table
|
||||||
mov edi, boot_pgd
|
mov edi, boot_pml4
|
||||||
mov cr3, edi
|
mov cr3, edi
|
||||||
|
|
||||||
; we need to enable PAE modus
|
; we need to enable PAE modus
|
||||||
|
@ -211,7 +211,7 @@ stublet:
|
||||||
jz Linvalid ; They aren't, there is no long mode.
|
jz Linvalid ; They aren't, there is no long mode.
|
||||||
|
|
||||||
; initialize page table
|
; initialize page table
|
||||||
mov edi, boot_pgd
|
mov edi, boot_pml4
|
||||||
mov cr3, edi
|
mov cr3, edi
|
||||||
|
|
||||||
; So lets make PML4T[0] point to the PDPT and so on:
|
; So lets make PML4T[0] point to the PDPT and so on:
|
||||||
|
|
|
@ -50,7 +50,7 @@ size_t* get_current_stack(void)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// use new page table
|
// use new page table
|
||||||
write_cr3(virt_to_phys((size_t)curr_task->pgd));
|
write_cr3(virt_to_phys((size_t)curr_task->page_map));
|
||||||
|
|
||||||
return curr_task->last_stack_pointer;
|
return curr_task->last_stack_pointer;
|
||||||
}
|
}
|
||||||
|
|
|
@ -46,7 +46,7 @@
|
||||||
* 0x00100000 - 0x0DEADFFF: Kernel (size depends on the configuration) (221MB)
|
* 0x00100000 - 0x0DEADFFF: Kernel (size depends on the configuration) (221MB)
|
||||||
* 0x0DEAE000 - 0x3FFFEFFF: Kernel heap (801MB)
|
* 0x0DEAE000 - 0x3FFFEFFF: Kernel heap (801MB)
|
||||||
* 0x3FFFF000 - 0x3FFFFFFF: Page Tables are mapped in this region (4KB)
|
* 0x3FFFF000 - 0x3FFFFFFF: Page Tables are mapped in this region (4KB)
|
||||||
* (The last 256 entries belongs to kernel space)
|
* (The first 256 entries belongs to kernel space)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -57,13 +57,14 @@ extern const void kernel_start;
|
||||||
extern const void kernel_end;
|
extern const void kernel_end;
|
||||||
|
|
||||||
// boot task's page directory and page directory lock
|
// boot task's page directory and page directory lock
|
||||||
static page_dir_t boot_pgd = {{[0 ... PGT_ENTRIES-1] = 0}};
|
static page_map_t boot_pgd = {{[0 ... MAP_ENTRIES-1] = 0}};
|
||||||
static page_table_t pgt_container = {{[0 ... PGT_ENTRIES-1] = 0}};
|
static page_map_t boot_pgt[KERNEL_SPACE/(1024*PAGE_SIZE)];
|
||||||
static page_table_t boot_pgt[KERNEL_SPACE/(1024*PAGE_SIZE)];
|
static page_map_t pgt_container = {{[0 ... MAP_ENTRIES-1] = 0}};
|
||||||
|
|
||||||
static spinlock_t kslock = SPINLOCK_INIT;
|
static spinlock_t kslock = SPINLOCK_INIT;
|
||||||
static int paging_enabled = 0;
|
static int paging_enabled = 0;
|
||||||
|
|
||||||
page_dir_t* get_boot_pgd(void)
|
page_map_t* get_boot_page_map(void)
|
||||||
{
|
{
|
||||||
return &boot_pgd;
|
return &boot_pgd;
|
||||||
}
|
}
|
||||||
|
@ -74,23 +75,23 @@ page_dir_t* get_boot_pgd(void)
|
||||||
* No PGD locking is needed because only create_pgd use this function and holds already the
|
* No PGD locking is needed because only create_pgd use this function and holds already the
|
||||||
* PGD lock.
|
* PGD lock.
|
||||||
*/
|
*/
|
||||||
inline static size_t copy_page_table(task_t* task, uint32_t pgd_index, page_table_t* pgt, int* counter)
|
inline static size_t copy_page_table(task_t* task, uint32_t pgd_index, page_map_t* pgt, int* counter)
|
||||||
{
|
{
|
||||||
uint32_t i;
|
uint32_t i;
|
||||||
page_table_t* new_pgt;
|
page_map_t* new_pgt;
|
||||||
size_t phyaddr;
|
size_t phyaddr;
|
||||||
|
|
||||||
if (BUILTIN_EXPECT(!pgt, 0))
|
if (BUILTIN_EXPECT(!pgt, 0))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
new_pgt = kmalloc(sizeof(page_table_t));
|
new_pgt = kmalloc(sizeof(page_map_t));
|
||||||
if (!new_pgt)
|
if (!new_pgt)
|
||||||
return 0;
|
return 0;
|
||||||
memset(new_pgt, 0x00, sizeof(page_table_t));
|
memset(new_pgt, 0x00, sizeof(page_map_t));
|
||||||
if (counter)
|
if (counter)
|
||||||
(*counter)++;
|
(*counter)++;
|
||||||
|
|
||||||
for(i=0; i<PGT_ENTRIES; i++) {
|
for(i=0; i<MAP_ENTRIES; i++) {
|
||||||
if (pgt->entries[i] & PAGE_MASK) {
|
if (pgt->entries[i] & PAGE_MASK) {
|
||||||
if (!(pgt->entries[i] & PG_USER)) {
|
if (!(pgt->entries[i] & PG_USER)) {
|
||||||
// Kernel page => copy only page entries
|
// Kernel page => copy only page entries
|
||||||
|
@ -117,11 +118,11 @@ inline static size_t copy_page_table(task_t* task, uint32_t pgd_index, page_tabl
|
||||||
return phyaddr;
|
return phyaddr;
|
||||||
}
|
}
|
||||||
|
|
||||||
int create_pgd(task_t* task, int copy)
|
int create_page_map(task_t* task, int copy)
|
||||||
{
|
{
|
||||||
page_dir_t* pgd;
|
page_map_t* pgd;
|
||||||
page_table_t* pgt;
|
page_map_t* pgt;
|
||||||
page_table_t* pgt_container;
|
page_map_t* pgt_container;
|
||||||
uint32_t i;
|
uint32_t i;
|
||||||
uint32_t index1, index2;
|
uint32_t index1, index2;
|
||||||
size_t viraddr, phyaddr;
|
size_t viraddr, phyaddr;
|
||||||
|
@ -133,25 +134,25 @@ int create_pgd(task_t* task, int copy)
|
||||||
|
|
||||||
// we already know the virtual address of the "page table container"
|
// we already know the virtual address of the "page table container"
|
||||||
// (see file header)
|
// (see file header)
|
||||||
pgt_container = (page_table_t*) ((KERNEL_SPACE - PAGE_SIZE) & PAGE_MASK);
|
pgt_container = (page_map_t*) ((KERNEL_SPACE - PAGE_SIZE) & PAGE_MASK);
|
||||||
|
|
||||||
// create new page directory for the new task
|
// create new page directory for the new task
|
||||||
pgd = kmalloc(sizeof(page_dir_t));
|
pgd = kmalloc(sizeof(page_map_t));
|
||||||
if (!pgd)
|
if (!pgd)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
memset(pgd, 0x00, sizeof(page_dir_t));
|
memset(pgd, 0x00, sizeof(page_map_t));
|
||||||
|
|
||||||
// create a new "page table container" for the new task
|
// create a new "page table container" for the new task
|
||||||
pgt = kmalloc(sizeof(page_table_t));
|
pgt = kmalloc(sizeof(page_map_t));
|
||||||
if (!pgt) {
|
if (!pgt) {
|
||||||
kfree(pgd, sizeof(page_dir_t));
|
kfree(pgd, sizeof(page_map_t));
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
memset(pgt, 0x00, sizeof(page_table_t));
|
memset(pgt, 0x00, sizeof(page_map_t));
|
||||||
|
|
||||||
spinlock_lock(&kslock);
|
spinlock_lock(&kslock);
|
||||||
|
|
||||||
for(i=0; i<PGT_ENTRIES; i++) {
|
for(i=0; i<MAP_ENTRIES; i++) {
|
||||||
pgd->entries[i] = boot_pgd.entries[i];
|
pgd->entries[i] = boot_pgd.entries[i];
|
||||||
// only kernel entries will be copied
|
// only kernel entries will be copied
|
||||||
if (pgd->entries[i] && !(pgd->entries[i] & PG_USER))
|
if (pgd->entries[i] && !(pgd->entries[i] & PG_USER))
|
||||||
|
@ -169,36 +170,33 @@ int create_pgd(task_t* task, int copy)
|
||||||
pgd->entries[index1] = ((size_t) virt_to_phys((size_t) pgt) & PAGE_MASK)|KERN_TABLE;
|
pgd->entries[index1] = ((size_t) virt_to_phys((size_t) pgt) & PAGE_MASK)|KERN_TABLE;
|
||||||
pgt->entries[index2] = ((size_t) virt_to_phys((size_t) pgt) & PAGE_MASK)|KERN_PAGE;
|
pgt->entries[index2] = ((size_t) virt_to_phys((size_t) pgt) & PAGE_MASK)|KERN_PAGE;
|
||||||
|
|
||||||
task->pgd = pgd;
|
task->page_map = pgd;
|
||||||
|
|
||||||
if (copy) {
|
if (copy) {
|
||||||
spinlock_irqsave_lock(&curr_task->pgd_lock);
|
spinlock_irqsave_lock(&curr_task->page_lock);
|
||||||
|
|
||||||
for (i=KERNEL_SPACE/(1024*PAGE_SIZE); i<1024; i++) {
|
for (i=KERNEL_SPACE/(1024*PAGE_SIZE); i<1024; i++) {
|
||||||
if (!(curr_task->pgd->entries[i]))
|
if (!(curr_task->page_map->entries[i]))
|
||||||
continue;
|
continue;
|
||||||
if (!(curr_task->pgd->entries[i] & PG_USER))
|
if (!(curr_task->page_map->entries[i] & PG_USER))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
phyaddr = copy_page_table(task, i, (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + i*PAGE_SIZE) & PAGE_MASK), &counter);
|
phyaddr = copy_page_table(task, i, (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + i*PAGE_SIZE) & PAGE_MASK), &counter);
|
||||||
if (phyaddr) {
|
if (phyaddr) {
|
||||||
pgd->entries[i] = (phyaddr & PAGE_MASK) | (curr_task->pgd->entries[i] & 0xFFF);
|
pgd->entries[i] = (phyaddr & PAGE_MASK) | (curr_task->page_map->entries[i] & 0xFFF);
|
||||||
pgt->entries[i] = (phyaddr & PAGE_MASK) | KERN_PAGE;
|
pgt->entries[i] = (phyaddr & PAGE_MASK) | KERN_PAGE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
spinlock_irqsave_unlock(&curr_task->pgd_lock);
|
spinlock_irqsave_unlock(&curr_task->page_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
return counter;
|
return counter;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
int drop_page_map(void)
|
||||||
* drops all page frames and the PGD of a user task
|
|
||||||
*/
|
|
||||||
int drop_pgd(void)
|
|
||||||
{
|
{
|
||||||
page_dir_t* pgd = per_core(current_task)->pgd;
|
page_map_t* pgd = per_core(current_task)->page_map;
|
||||||
size_t phy_pgd = virt_to_phys((size_t) pgd);
|
size_t phy_pgd = virt_to_phys((size_t) pgd);
|
||||||
task_t* task = per_core(current_task);
|
task_t* task = per_core(current_task);
|
||||||
uint32_t i;
|
uint32_t i;
|
||||||
|
@ -206,9 +204,9 @@ int drop_pgd(void)
|
||||||
if (BUILTIN_EXPECT(pgd == &boot_pgd, 0))
|
if (BUILTIN_EXPECT(pgd == &boot_pgd, 0))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
spinlock_irqsave_lock(&task->pgd_lock);
|
spinlock_irqsave_lock(&task->page_lock);
|
||||||
|
|
||||||
for(i=0; i<PGT_ENTRIES; i++) {
|
for(i=0; i<MAP_ENTRIES; i++) {
|
||||||
if (pgd->entries[i] & PG_USER) {
|
if (pgd->entries[i] & PG_USER) {
|
||||||
put_page(pgd->entries[i] & PAGE_MASK);
|
put_page(pgd->entries[i] & PAGE_MASK);
|
||||||
pgd->entries[i] = 0;
|
pgd->entries[i] = 0;
|
||||||
|
@ -218,9 +216,9 @@ int drop_pgd(void)
|
||||||
// freeing the page directory
|
// freeing the page directory
|
||||||
put_page(phy_pgd);
|
put_page(phy_pgd);
|
||||||
|
|
||||||
task->pgd = NULL;
|
task->page_map = NULL;
|
||||||
|
|
||||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
spinlock_irqsave_unlock(&task->page_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -229,24 +227,24 @@ size_t virt_to_phys(size_t viraddr)
|
||||||
{
|
{
|
||||||
task_t* task = per_core(current_task);
|
task_t* task = per_core(current_task);
|
||||||
uint32_t index1, index2;
|
uint32_t index1, index2;
|
||||||
page_table_t* pgt;
|
page_map_t* pgt;
|
||||||
size_t ret = 0;
|
size_t ret = 0;
|
||||||
|
|
||||||
if (!paging_enabled)
|
if (!paging_enabled)
|
||||||
return viraddr;
|
return viraddr;
|
||||||
|
|
||||||
if (BUILTIN_EXPECT(!task || !task->pgd, 0))
|
if (BUILTIN_EXPECT(!task || !task->page_map, 0))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
spinlock_irqsave_lock(&task->pgd_lock);
|
spinlock_irqsave_lock(&task->page_lock);
|
||||||
|
|
||||||
index1 = viraddr >> 22;
|
index1 = viraddr >> 22;
|
||||||
index2 = (viraddr >> 12) & 0x3FF;
|
index2 = (viraddr >> 12) & 0x3FF;
|
||||||
|
|
||||||
if (!(task->pgd->entries[index1] & PAGE_MASK))
|
if (!(task->page_map->entries[index1] & PAGE_MASK))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
pgt = (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||||
if (!pgt || !(pgt->entries[index2]))
|
if (!pgt || !(pgt->entries[index2]))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
@ -255,7 +253,7 @@ size_t virt_to_phys(size_t viraddr)
|
||||||
out:
|
out:
|
||||||
//kprintf("vir %p to phy %p\n", viraddr, ret);
|
//kprintf("vir %p to phy %p\n", viraddr, ret);
|
||||||
|
|
||||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
spinlock_irqsave_unlock(&task->page_lock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -263,11 +261,11 @@ out:
|
||||||
size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flags)
|
size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flags)
|
||||||
{
|
{
|
||||||
task_t* task = per_core(current_task);
|
task_t* task = per_core(current_task);
|
||||||
page_table_t* pgt;
|
page_map_t* pgt;
|
||||||
size_t index, i;
|
size_t index, i;
|
||||||
size_t ret;
|
size_t ret;
|
||||||
|
|
||||||
if (BUILTIN_EXPECT(!task || !task->pgd, 0))
|
if (BUILTIN_EXPECT(!task || !task->page_map, 0))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (BUILTIN_EXPECT(!paging_enabled && (viraddr != phyaddr), 0))
|
if (BUILTIN_EXPECT(!paging_enabled && (viraddr != phyaddr), 0))
|
||||||
|
@ -276,7 +274,7 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
|
||||||
if (flags & MAP_KERNEL_SPACE)
|
if (flags & MAP_KERNEL_SPACE)
|
||||||
spinlock_lock(&kslock);
|
spinlock_lock(&kslock);
|
||||||
else
|
else
|
||||||
spinlock_irqsave_lock(&task->pgd_lock);
|
spinlock_irqsave_lock(&task->page_lock);
|
||||||
|
|
||||||
if (!viraddr) {
|
if (!viraddr) {
|
||||||
viraddr = vm_alloc(npages, flags);
|
viraddr = vm_alloc(npages, flags);
|
||||||
|
@ -292,10 +290,10 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
|
||||||
for(i=0; i<npages; i++, viraddr+=PAGE_SIZE, phyaddr+=PAGE_SIZE) {
|
for(i=0; i<npages; i++, viraddr+=PAGE_SIZE, phyaddr+=PAGE_SIZE) {
|
||||||
index = viraddr >> 22;
|
index = viraddr >> 22;
|
||||||
|
|
||||||
if (!(task->pgd->entries[index])) {
|
if (!(task->page_map->entries[index])) {
|
||||||
page_table_t* pgt_container;
|
page_map_t* pgt_container;
|
||||||
|
|
||||||
pgt = (page_table_t*) get_pages(1);
|
pgt = (page_map_t*) get_page();
|
||||||
if (BUILTIN_EXPECT(!pgt, 0)) {
|
if (BUILTIN_EXPECT(!pgt, 0)) {
|
||||||
kputs("map_address: out of memory\n");
|
kputs("map_address: out of memory\n");
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
@ -304,17 +302,17 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
|
||||||
|
|
||||||
// set the new page table into the directory
|
// set the new page table into the directory
|
||||||
if (flags & MAP_USER_SPACE)
|
if (flags & MAP_USER_SPACE)
|
||||||
task->pgd->entries[index] = (uint32_t)pgt|USER_TABLE;
|
task->page_map->entries[index] = (uint32_t)pgt|USER_TABLE;
|
||||||
else
|
else
|
||||||
task->pgd->entries[index] = (uint32_t)pgt|KERN_TABLE;
|
task->page_map->entries[index] = (uint32_t)pgt|KERN_TABLE;
|
||||||
|
|
||||||
// if paging is already enabled, we need to use the virtual address
|
// if paging is already enabled, we need to use the virtual address
|
||||||
if (paging_enabled)
|
if (paging_enabled)
|
||||||
// we already know the virtual address of the "page table container"
|
// we already know the virtual address of the "page table container"
|
||||||
// (see file header)
|
// (see file header)
|
||||||
pgt_container = (page_table_t*) ((KERNEL_SPACE - PAGE_SIZE) & PAGE_MASK);
|
pgt_container = (page_map_t*) ((KERNEL_SPACE - PAGE_SIZE) & PAGE_MASK);
|
||||||
else
|
else
|
||||||
pgt_container = (page_table_t*) (task->pgd->entries[(KERNEL_SPACE - PAGE_SIZE) >> 22] & PAGE_MASK);
|
pgt_container = (page_map_t*) (task->page_map->entries[(KERNEL_SPACE - PAGE_SIZE) >> 22] & PAGE_MASK);
|
||||||
|
|
||||||
if (BUILTIN_EXPECT(!pgt_container, 0)) {
|
if (BUILTIN_EXPECT(!pgt_container, 0)) {
|
||||||
kputs("map_address: internal error\n");
|
kputs("map_address: internal error\n");
|
||||||
|
@ -330,11 +328,11 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
|
||||||
memset((void*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index*PAGE_SIZE) & PAGE_MASK), 0x00, PAGE_SIZE);
|
memset((void*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index*PAGE_SIZE) & PAGE_MASK), 0x00, PAGE_SIZE);
|
||||||
else
|
else
|
||||||
memset(pgt, 0x00, PAGE_SIZE);
|
memset(pgt, 0x00, PAGE_SIZE);
|
||||||
} else pgt = (page_table_t*) (task->pgd->entries[index] & PAGE_MASK);
|
} else pgt = (page_map_t*) (task->page_map->entries[index] & PAGE_MASK);
|
||||||
|
|
||||||
/* convert physical address to virtual */
|
/* convert physical address to virtual */
|
||||||
if (paging_enabled)
|
if (paging_enabled)
|
||||||
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index*PAGE_SIZE) & PAGE_MASK);
|
pgt = (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index*PAGE_SIZE) & PAGE_MASK);
|
||||||
|
|
||||||
index = (viraddr >> 12) & 0x3FF;
|
index = (viraddr >> 12) & 0x3FF;
|
||||||
if (pgt->entries[index] && !(flags & MAP_REMAP)) {
|
if (pgt->entries[index] && !(flags & MAP_REMAP)) {
|
||||||
|
@ -382,7 +380,7 @@ out:
|
||||||
if (flags & MAP_KERNEL_SPACE)
|
if (flags & MAP_KERNEL_SPACE)
|
||||||
spinlock_unlock(&kslock);
|
spinlock_unlock(&kslock);
|
||||||
else
|
else
|
||||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
spinlock_irqsave_unlock(&task->page_lock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -392,18 +390,18 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
|
||||||
uint32_t index1, index2, newflags;
|
uint32_t index1, index2, newflags;
|
||||||
size_t viraddr = start & 0xFFFFF000;
|
size_t viraddr = start & 0xFFFFF000;
|
||||||
size_t phyaddr;
|
size_t phyaddr;
|
||||||
page_table_t* pgt;
|
page_map_t* pgt;
|
||||||
page_dir_t* pgd;
|
page_map_t* pgd;
|
||||||
task_t* task = per_core(current_task);
|
task_t* task = per_core(current_task);
|
||||||
|
|
||||||
if (BUILTIN_EXPECT(!paging_enabled, 0))
|
if (BUILTIN_EXPECT(!paging_enabled, 0))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
pgd = per_core(current_task)->pgd;
|
pgd = per_core(current_task)->page_map;
|
||||||
if (BUILTIN_EXPECT(!pgd, 0))
|
if (BUILTIN_EXPECT(!pgd, 0))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
spinlock_irqsave_lock(&task->pgd_lock);
|
spinlock_irqsave_lock(&task->page_lock);
|
||||||
|
|
||||||
while (viraddr < end)
|
while (viraddr < end)
|
||||||
{
|
{
|
||||||
|
@ -411,7 +409,7 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
|
||||||
index2 = (viraddr >> 12) & 0x3FF;
|
index2 = (viraddr >> 12) & 0x3FF;
|
||||||
|
|
||||||
while ((viraddr < end) && (index2 < 1024)) {
|
while ((viraddr < end) && (index2 < 1024)) {
|
||||||
pgt = (page_table_t*) (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
pgt = (page_map_t*) (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||||
if (pgt && pgt->entries[index2]) {
|
if (pgt && pgt->entries[index2]) {
|
||||||
phyaddr = pgt->entries[index2] & PAGE_MASK;
|
phyaddr = pgt->entries[index2] & PAGE_MASK;
|
||||||
newflags = pgt->entries[index2] & 0xFFF; // get old flags
|
newflags = pgt->entries[index2] & 0xFFF; // get old flags
|
||||||
|
@ -448,7 +446,7 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
spinlock_irqsave_unlock(&task->page_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -464,9 +462,9 @@ size_t vm_alloc(uint32_t npages, uint32_t flags)
|
||||||
uint32_t index1, index2, j;
|
uint32_t index1, index2, j;
|
||||||
size_t viraddr, i, ret = 0;
|
size_t viraddr, i, ret = 0;
|
||||||
size_t start, end;
|
size_t start, end;
|
||||||
page_table_t* pgt;
|
page_map_t* pgt;
|
||||||
|
|
||||||
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
|
if (BUILTIN_EXPECT(!task || !task->page_map || !paging_enabled, 0))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (flags & MAP_KERNEL_SPACE) {
|
if (flags & MAP_KERNEL_SPACE) {
|
||||||
|
@ -483,7 +481,7 @@ size_t vm_alloc(uint32_t npages, uint32_t flags)
|
||||||
if (flags & MAP_KERNEL_SPACE)
|
if (flags & MAP_KERNEL_SPACE)
|
||||||
spinlock_lock(&kslock);
|
spinlock_lock(&kslock);
|
||||||
else
|
else
|
||||||
spinlock_irqsave_lock(&task->pgd_lock);
|
spinlock_irqsave_lock(&task->page_lock);
|
||||||
|
|
||||||
viraddr = i = start;
|
viraddr = i = start;
|
||||||
j = 0;
|
j = 0;
|
||||||
|
@ -491,7 +489,7 @@ size_t vm_alloc(uint32_t npages, uint32_t flags)
|
||||||
index1 = i >> 22;
|
index1 = i >> 22;
|
||||||
index2 = (i >> 12) & 0x3FF;
|
index2 = (i >> 12) & 0x3FF;
|
||||||
|
|
||||||
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
pgt = (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||||
if (!pgt || !(pgt->entries[index2])) {
|
if (!pgt || !(pgt->entries[index2])) {
|
||||||
i+=PAGE_SIZE;
|
i+=PAGE_SIZE;
|
||||||
j++;
|
j++;
|
||||||
|
@ -509,7 +507,7 @@ size_t vm_alloc(uint32_t npages, uint32_t flags)
|
||||||
if (flags & MAP_KERNEL_SPACE)
|
if (flags & MAP_KERNEL_SPACE)
|
||||||
spinlock_unlock(&kslock);
|
spinlock_unlock(&kslock);
|
||||||
else
|
else
|
||||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
spinlock_irqsave_unlock(&task->page_lock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -519,22 +517,22 @@ int unmap_region(size_t viraddr, uint32_t npages)
|
||||||
task_t* task = per_core(current_task);
|
task_t* task = per_core(current_task);
|
||||||
uint32_t i;
|
uint32_t i;
|
||||||
uint32_t index1, index2;
|
uint32_t index1, index2;
|
||||||
page_table_t* pgt;
|
page_map_t* pgt;
|
||||||
|
|
||||||
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
|
if (BUILTIN_EXPECT(!task || !task->page_map || !paging_enabled, 0))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (viraddr <= KERNEL_SPACE)
|
if (viraddr <= KERNEL_SPACE)
|
||||||
spinlock_lock(&kslock);
|
spinlock_lock(&kslock);
|
||||||
else
|
else
|
||||||
spinlock_irqsave_lock(&task->pgd_lock);
|
spinlock_irqsave_lock(&task->page_lock);
|
||||||
|
|
||||||
for(i=0; i<npages; i++, viraddr+=PAGE_SIZE)
|
for(i=0; i<npages; i++, viraddr+=PAGE_SIZE)
|
||||||
{
|
{
|
||||||
index1 = viraddr >> 22;
|
index1 = viraddr >> 22;
|
||||||
index2 = (viraddr >> 12) & 0x3FF;
|
index2 = (viraddr >> 12) & 0x3FF;
|
||||||
|
|
||||||
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
pgt = (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||||
if (!pgt)
|
if (!pgt)
|
||||||
continue;
|
continue;
|
||||||
pgt->entries[index2] &= ~PG_PRESENT;
|
pgt->entries[index2] &= ~PG_PRESENT;
|
||||||
|
@ -548,7 +546,7 @@ int unmap_region(size_t viraddr, uint32_t npages)
|
||||||
if (viraddr <= KERNEL_SPACE)
|
if (viraddr <= KERNEL_SPACE)
|
||||||
spinlock_unlock(&kslock);
|
spinlock_unlock(&kslock);
|
||||||
else
|
else
|
||||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
spinlock_irqsave_unlock(&task->page_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -558,22 +556,22 @@ int vm_free(size_t viraddr, uint32_t npages)
|
||||||
task_t* task = per_core(current_task);
|
task_t* task = per_core(current_task);
|
||||||
uint32_t i;
|
uint32_t i;
|
||||||
uint32_t index1, index2;
|
uint32_t index1, index2;
|
||||||
page_table_t* pgt;
|
page_map_t* pgt;
|
||||||
|
|
||||||
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
|
if (BUILTIN_EXPECT(!task || !task->page_map || !paging_enabled, 0))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (viraddr <= KERNEL_SPACE)
|
if (viraddr <= KERNEL_SPACE)
|
||||||
spinlock_lock(&kslock);
|
spinlock_lock(&kslock);
|
||||||
else
|
else
|
||||||
spinlock_irqsave_lock(&task->pgd_lock);
|
spinlock_irqsave_lock(&task->page_lock);
|
||||||
|
|
||||||
for(i=0; i<npages; i++, viraddr+=PAGE_SIZE)
|
for(i=0; i<npages; i++, viraddr+=PAGE_SIZE)
|
||||||
{
|
{
|
||||||
index1 = viraddr >> 22;
|
index1 = viraddr >> 22;
|
||||||
index2 = (viraddr >> 12) & 0x3FF;
|
index2 = (viraddr >> 12) & 0x3FF;
|
||||||
|
|
||||||
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
pgt = (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||||
if (!pgt)
|
if (!pgt)
|
||||||
continue;
|
continue;
|
||||||
pgt->entries[index2] = 0;
|
pgt->entries[index2] = 0;
|
||||||
|
@ -584,7 +582,7 @@ int vm_free(size_t viraddr, uint32_t npages)
|
||||||
if (viraddr <= KERNEL_SPACE)
|
if (viraddr <= KERNEL_SPACE)
|
||||||
spinlock_unlock(&kslock);
|
spinlock_unlock(&kslock);
|
||||||
else
|
else
|
||||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
spinlock_irqsave_unlock(&task->page_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -593,8 +591,8 @@ int print_paging_tree(size_t viraddr)
|
||||||
{
|
{
|
||||||
task_t* task = per_core(current_task);
|
task_t* task = per_core(current_task);
|
||||||
uint32_t index1, index2;
|
uint32_t index1, index2;
|
||||||
page_dir_t* pgd = NULL;
|
page_map_t* pgd = NULL;
|
||||||
page_table_t* pgt = NULL;
|
page_map_t* pgt = NULL;
|
||||||
|
|
||||||
if (BUILTIN_EXPECT(!viraddr, 0))
|
if (BUILTIN_EXPECT(!viraddr, 0))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -602,20 +600,20 @@ int print_paging_tree(size_t viraddr)
|
||||||
index1 = viraddr >> 22;
|
index1 = viraddr >> 22;
|
||||||
index2 = (viraddr >> 12) & 0x3FF;
|
index2 = (viraddr >> 12) & 0x3FF;
|
||||||
|
|
||||||
spinlock_irqsave_lock(&task->pgd_lock);
|
spinlock_irqsave_lock(&task->page_lock);
|
||||||
|
|
||||||
kprintf("Paging dump of address 0x%x\n", viraddr);
|
kprintf("Paging dump of address 0x%x\n", viraddr);
|
||||||
pgd = task->pgd;
|
pgd = task->page_map;
|
||||||
kprintf("\tPage directory entry %u: ", index1);
|
kprintf("\tPage directory entry %u: ", index1);
|
||||||
if (pgd) {
|
if (pgd) {
|
||||||
kprintf("0x%0x\n", pgd->entries[index1]);
|
kprintf("0x%0x\n", pgd->entries[index1]);
|
||||||
pgt = (page_table_t*) (pgd->entries[index1] & PAGE_MASK);
|
pgt = (page_map_t*) (pgd->entries[index1] & PAGE_MASK);
|
||||||
} else
|
} else
|
||||||
kputs("invalid page directory\n");
|
kputs("invalid page directory\n");
|
||||||
|
|
||||||
/* convert physical address to virtual */
|
/* convert physical address to virtual */
|
||||||
if (paging_enabled && pgt)
|
if (paging_enabled && pgt)
|
||||||
pgt = (page_table_t*) (KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE);
|
pgt = (page_map_t*) (KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE);
|
||||||
|
|
||||||
kprintf("\tPage table entry %u: ", index2);
|
kprintf("\tPage table entry %u: ", index2);
|
||||||
if (pgt)
|
if (pgt)
|
||||||
|
@ -623,7 +621,7 @@ int print_paging_tree(size_t viraddr)
|
||||||
else
|
else
|
||||||
kputs("invalid page table\n");
|
kputs("invalid page table\n");
|
||||||
|
|
||||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
spinlock_irqsave_unlock(&task->page_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -631,12 +629,12 @@ int print_paging_tree(size_t viraddr)
|
||||||
static void pagefault_handler(struct state *s)
|
static void pagefault_handler(struct state *s)
|
||||||
{
|
{
|
||||||
task_t* task = per_core(current_task);
|
task_t* task = per_core(current_task);
|
||||||
page_dir_t* pgd = task->pgd;
|
|
||||||
page_table_t* pgt = NULL;
|
|
||||||
size_t viraddr = read_cr2();
|
size_t viraddr = read_cr2();
|
||||||
size_t phyaddr;
|
size_t phyaddr;
|
||||||
#ifdef CONFIG_ROCKCREEK
|
#ifdef CONFIG_ROCKCREEK
|
||||||
uint32_t index1, index2;
|
uint32_t index1, index2;
|
||||||
|
page_map_t* pgd = task->page_map;
|
||||||
|
page_map_t* pgt = NULL;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if ((viraddr >= task->start_heap) && (viraddr <= task->end_heap) && (viraddr > KERNEL_SPACE)) {
|
if ((viraddr >= task->start_heap) && (viraddr <= task->end_heap) && (viraddr > KERNEL_SPACE)) {
|
||||||
|
@ -661,7 +659,7 @@ static void pagefault_handler(struct state *s)
|
||||||
index2 = (viraddr >> 12) & 0x3FF;
|
index2 = (viraddr >> 12) & 0x3FF;
|
||||||
if (!pgd || !(pgd->entries[index1] & PAGE_MASK))
|
if (!pgd || !(pgd->entries[index1] & PAGE_MASK))
|
||||||
goto default_handler;
|
goto default_handler;
|
||||||
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
pgt = (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||||
if (!pgt || !(pgt->entries[index2]))
|
if (!pgt || !(pgt->entries[index2]))
|
||||||
goto default_handler;
|
goto default_handler;
|
||||||
if (pgt->entries[index2] & PG_SVM_INIT) {
|
if (pgt->entries[index2] & PG_SVM_INIT) {
|
||||||
|
@ -687,7 +685,7 @@ default_handler:
|
||||||
int arch_paging_init(void)
|
int arch_paging_init(void)
|
||||||
{
|
{
|
||||||
uint32_t i, npages, index1, index2;
|
uint32_t i, npages, index1, index2;
|
||||||
page_table_t* pgt;
|
page_map_t* pgt;
|
||||||
size_t viraddr;
|
size_t viraddr;
|
||||||
|
|
||||||
// uninstall default handler and install our own
|
// uninstall default handler and install our own
|
||||||
|
@ -703,15 +701,15 @@ int arch_paging_init(void)
|
||||||
index2 = (viraddr >> 12) & 0x3FF;
|
index2 = (viraddr >> 12) & 0x3FF;
|
||||||
|
|
||||||
// now, we create a self reference
|
// now, we create a self reference
|
||||||
per_core(current_task)->pgd->entries[index1] = (((size_t) pgt) & PAGE_MASK)|KERN_TABLE;
|
per_core(current_task)->page_map->entries[index1] = (((size_t) pgt) & PAGE_MASK)|KERN_TABLE;
|
||||||
pgt->entries[index2] = ((size_t) pgt & 0xFFFFF000)|KERN_PAGE;
|
pgt->entries[index2] = ((size_t) pgt & PAGE_MASK)|KERN_PAGE;
|
||||||
|
|
||||||
// create the other PGTs for the kernel space
|
// create the other PGTs for the kernel space
|
||||||
for(i=0; i<KERNEL_SPACE/(1024*PAGE_SIZE)-1; i++) {
|
for(i=0; i<KERNEL_SPACE/(1024*PAGE_SIZE)-1; i++) {
|
||||||
size_t phyaddr = boot_pgt+i;
|
size_t phyaddr = boot_pgt+i;
|
||||||
|
|
||||||
memset((void*) phyaddr, 0x00, sizeof(page_table_t));
|
memset((void*) phyaddr, 0x00, sizeof(page_map_t));
|
||||||
per_core(current_task)->pgd->entries[i] = (phyaddr & PAGE_MASK)|KERN_TABLE;
|
per_core(current_task)->page_map->entries[i] = (phyaddr & PAGE_MASK)|KERN_TABLE;
|
||||||
pgt->entries[i] = (phyaddr & PAGE_MASK)|KERN_PAGE;
|
pgt->entries[i] = (phyaddr & PAGE_MASK)|KERN_PAGE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -738,9 +736,7 @@ int arch_paging_init(void)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_MULTIBOOT
|
#ifdef CONFIG_MULTIBOOT
|
||||||
/*
|
// map mb_info into the kernel space
|
||||||
* of course, mb_info has to map into the kernel space
|
|
||||||
*/
|
|
||||||
if (mb_info)
|
if (mb_info)
|
||||||
map_region((size_t) mb_info & PAGE_MASK, (size_t) mb_info & PAGE_MASK, 1, MAP_KERNEL_SPACE);
|
map_region((size_t) mb_info & PAGE_MASK, (size_t) mb_info & PAGE_MASK, 1, MAP_KERNEL_SPACE);
|
||||||
|
|
||||||
|
@ -805,7 +801,7 @@ int arch_paging_init(void)
|
||||||
kprintf("Map FPGA regsiters at 0x%x\n", viraddr);
|
kprintf("Map FPGA regsiters at 0x%x\n", viraddr);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* enable paging */
|
// enable paging
|
||||||
write_cr3((uint32_t) &boot_pgd);
|
write_cr3((uint32_t) &boot_pgd);
|
||||||
i = read_cr0();
|
i = read_cr0();
|
||||||
i = i | (1 << 31);
|
i = i | (1 << 31);
|
||||||
|
|
|
@ -31,21 +31,15 @@
|
||||||
#include <asm/irq.h>
|
#include <asm/irq.h>
|
||||||
#include <asm/multiboot.h>
|
#include <asm/multiboot.h>
|
||||||
#include <asm/apic.h>
|
#include <asm/apic.h>
|
||||||
#ifdef CONFIG_ROCKCREEK
|
|
||||||
#include <asm/RCCE_lib.h>
|
|
||||||
#include <asm/SCC_API.h>
|
|
||||||
#include <asm/svm.h>
|
|
||||||
#include <asm/icc.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Virtual Memory Layout of the standard configuration
|
* Virtual Memory Layout of the standard configuration
|
||||||
* (1 GB kernel space)
|
* (1 GB kernel space)
|
||||||
*
|
*
|
||||||
* 0x00000000 - 0x000FFFFF: reserved for IO devices (16MB)
|
* 0x000000000000 - 0x0000000FFFFF: reserved for IO devices (16MB)
|
||||||
* 0x00100000 - 0x0DEADFFF: Kernel (size depends on the configuration) (221MB)
|
* 0x000000100000 - 0x00000DEADFFF: Kernel (size depends on the configuration) (221MB)
|
||||||
* 0x0DEAE000 - 0x3FFFFFFF: Kernel heap
|
* 0x00000DEAE000 - 0x00003FFFFFFF: Kernel heap
|
||||||
*
|
* 0xFF8000000000 - 0xFFFFFFFFFFFF: Paging structures are mapped in this region (1GB)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -55,17 +49,22 @@
|
||||||
extern const void kernel_start;
|
extern const void kernel_start;
|
||||||
extern const void kernel_end;
|
extern const void kernel_end;
|
||||||
|
|
||||||
// boot task's page directory and p:age directory lock
|
// boot task's page directory and page directory lock
|
||||||
extern page_dir_t boot_pgd; // TODO: initialization done in entry64.asm
|
extern page_map_t boot_pml4;
|
||||||
static spinlock_t kslock = SPINLOCK_INIT;
|
static spinlock_t kslock = SPINLOCK_INIT;
|
||||||
static int paging_enabled = 0;
|
static int paging_enabled = 0;
|
||||||
|
|
||||||
page_dir_t* get_boot_pgd(void)
|
/*static page_map_t boot_pml4 = {{[0 ... MAP_ENTRIES-1] = 0}};
|
||||||
|
static page_map_t boot_pdpt = {{[0 ... MAP_ENTRIES-1] = 0}};
|
||||||
|
static page_map_t boot_pgd = {{[0 ... MAP_ENTRIES-1] = 0}};
|
||||||
|
static page_map_t boot_pgt = {{[0 ... MAP_ENTRIES-1] = 0}};*/
|
||||||
|
|
||||||
|
page_map_t* get_boot_page_map(void)
|
||||||
{
|
{
|
||||||
return &boot_pgd;
|
return &boot_pml4;
|
||||||
}
|
}
|
||||||
|
|
||||||
int create_pgd(task_t* task, int copy)
|
int create_page_map(task_t* task, int copy)
|
||||||
{
|
{
|
||||||
// TODO: Currently, we support only kernel tasks
|
// TODO: Currently, we support only kernel tasks
|
||||||
// => all tasks are able to use the same pgd
|
// => all tasks are able to use the same pgd
|
||||||
|
@ -73,18 +72,15 @@ int create_pgd(task_t* task, int copy)
|
||||||
if (BUILTIN_EXPECT(!paging_enabled, 0))
|
if (BUILTIN_EXPECT(!paging_enabled, 0))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
task->pgd = get_boot_pgd();
|
task->page_map = get_boot_page_map();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
int drop_page_map(void)
|
||||||
* drops all page frames and the PGD of a user task
|
|
||||||
*/
|
|
||||||
int drop_pgd(void)
|
|
||||||
{
|
{
|
||||||
#if 0
|
#if 0
|
||||||
page_dir_t* pgd = per_core(current_task)->pgd;
|
page_map_t* pgd = per_core(current_task)->page_map;
|
||||||
size_t phy_pgd = virt_to_phys((size_t) pgd);
|
size_t phy_pgd = virt_to_phys((size_t) pgd);
|
||||||
task_t* task = per_core(current_task);
|
task_t* task = per_core(current_task);
|
||||||
uint32_t i;
|
uint32_t i;
|
||||||
|
@ -92,7 +88,7 @@ int drop_pgd(void)
|
||||||
if (BUILTIN_EXPECT(pgd == &boot_pgd, 0))
|
if (BUILTIN_EXPECT(pgd == &boot_pgd, 0))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
spinlock_lock(&task->pgd_lock);
|
spinlock_lock(&task->page_lock);
|
||||||
|
|
||||||
for(i=0; i<1024; i++) {
|
for(i=0; i<1024; i++) {
|
||||||
if (pgd->entries[i] & PG_USER) {
|
if (pgd->entries[i] & PG_USER) {
|
||||||
|
@ -104,9 +100,9 @@ int drop_pgd(void)
|
||||||
// freeing the page directory
|
// freeing the page directory
|
||||||
put_page(phy_pgd);
|
put_page(phy_pgd);
|
||||||
|
|
||||||
task->pgd = NULL;
|
task->page_map = NULL;
|
||||||
|
|
||||||
spinlock_unlock(&task->pgd_lock);
|
spinlock_unlock(&task->page_lock);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -115,36 +111,36 @@ int drop_pgd(void)
|
||||||
size_t virt_to_phys(size_t viraddr)
|
size_t virt_to_phys(size_t viraddr)
|
||||||
{
|
{
|
||||||
task_t* task = per_core(current_task);
|
task_t* task = per_core(current_task);
|
||||||
uint16_t idx_pd4 = (viraddr >> 39) & 0x1FF;
|
page_map_t* pdpt, * pgd , * pgt;
|
||||||
uint16_t idx_dirp = (viraddr >> 30) & 0x1FF;
|
uint16_t index_pml4 = (viraddr >> 39) & 0x1FF;
|
||||||
uint16_t idx_dir = (viraddr >> 21) & 0x1FF;
|
uint16_t index_pdpt = (viraddr >> 30) & 0x1FF;
|
||||||
uint16_t idx_table = (viraddr >> 12) & 0x1FF;
|
uint16_t index_pgd = (viraddr >> 21) & 0x1FF;
|
||||||
page_table_t* pgt;
|
uint16_t index_pgt = (viraddr >> 12) & 0x1FF;
|
||||||
size_t ret = 0;
|
size_t ret = 0;
|
||||||
|
|
||||||
if (!paging_enabled)
|
if (!paging_enabled)
|
||||||
return viraddr;
|
return viraddr;
|
||||||
|
|
||||||
if (BUILTIN_EXPECT(!task || !task->pgd, 0))
|
if (BUILTIN_EXPECT(!task || !task->page_map, 0))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
spinlock_irqsave_lock(&task->pgd_lock);
|
spinlock_irqsave_lock(&task->page_lock);
|
||||||
|
|
||||||
// TODO: Currently, we allocate pages only in kernel space.
|
// TODO: Currently, we allocate pages only in kernel space.
|
||||||
// => physical address of the page table is identical of the virtual address
|
// => physical address of the page table is identical of the virtual address
|
||||||
pgt = (page_table_t*) (task->pgd->entries[idx_pd4] & PAGE_MASK);
|
pdpt = (page_map_t*) (task->page_map->entries[index_pml4] & PAGE_MASK);
|
||||||
|
if (!pdpt)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
pgd = (page_map_t*) (pdpt->entries[index_pdpt] & PAGE_MASK);
|
||||||
|
if (!pgd)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
pgt = (page_map_t*) (pgd->entries[index_pgd] & PAGE_MASK);
|
||||||
if (!pgt)
|
if (!pgt)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
pgt = (page_table_t*) (pgt->entries[idx_dirp] & PAGE_MASK);
|
ret = (size_t) (pgt->entries[index_pgt] & PAGE_MASK);
|
||||||
if (!pgt)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
pgt = (page_table_t*) (pgt->entries[idx_dir] & PAGE_MASK);
|
|
||||||
if (!pgt)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
ret = (size_t) (pgt->entries[idx_table] & PAGE_MASK);
|
|
||||||
if (!ret)
|
if (!ret)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
@ -152,7 +148,7 @@ size_t virt_to_phys(size_t viraddr)
|
||||||
out:
|
out:
|
||||||
//kprintf("vir %p to phy %p\n", viraddr, ret);
|
//kprintf("vir %p to phy %p\n", viraddr, ret);
|
||||||
|
|
||||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
spinlock_irqsave_unlock(&task->page_lock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -160,10 +156,12 @@ out:
|
||||||
size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flags)
|
size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flags)
|
||||||
{
|
{
|
||||||
task_t* task = per_core(current_task);
|
task_t* task = per_core(current_task);
|
||||||
page_table_t* pgt;
|
page_map_t* pdpt, * pgd, * pgt;
|
||||||
|
uint16_t index_pml4, index_pdpt;
|
||||||
|
uint16_t index_pgd, index_pgt;
|
||||||
size_t i, ret;
|
size_t i, ret;
|
||||||
|
|
||||||
if (BUILTIN_EXPECT(!task || !task->pgd, 0))
|
if (BUILTIN_EXPECT(!task || !task->page_map, 0))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (BUILTIN_EXPECT(!paging_enabled && (viraddr != phyaddr), 0))
|
if (BUILTIN_EXPECT(!paging_enabled && (viraddr != phyaddr), 0))
|
||||||
|
@ -172,7 +170,7 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
|
||||||
if (flags & MAP_KERNEL_SPACE)
|
if (flags & MAP_KERNEL_SPACE)
|
||||||
spinlock_lock(&kslock);
|
spinlock_lock(&kslock);
|
||||||
else
|
else
|
||||||
spinlock_irqsave_lock(&task->pgd_lock);
|
spinlock_irqsave_lock(&task->page_lock);
|
||||||
|
|
||||||
if (!viraddr) {
|
if (!viraddr) {
|
||||||
viraddr = vm_alloc(npages, flags);
|
viraddr = vm_alloc(npages, flags);
|
||||||
|
@ -185,26 +183,26 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
|
||||||
|
|
||||||
ret = viraddr;
|
ret = viraddr;
|
||||||
for(i=0; i<npages; i++, viraddr+=PAGE_SIZE, phyaddr+=PAGE_SIZE) {
|
for(i=0; i<npages; i++, viraddr+=PAGE_SIZE, phyaddr+=PAGE_SIZE) {
|
||||||
uint16_t idx_pd4 = (viraddr >> 39) & 0x1FF;
|
index_pml4 = (viraddr >> 39) & 0x1FF;
|
||||||
uint16_t idx_dirp = (viraddr >> 30) & 0x1FF;
|
index_pdpt = (viraddr >> 30) & 0x1FF;
|
||||||
uint16_t idx_dir = (viraddr >> 21) & 0x1FF;
|
index_pgd = (viraddr >> 21) & 0x1FF;
|
||||||
uint16_t idx_table = (viraddr >> 12) & 0x1FF;
|
index_pgt = (viraddr >> 12) & 0x1FF;
|
||||||
|
|
||||||
pgt = (page_table_t*) (task->pgd->entries[idx_pd4] & PAGE_MASK);
|
pdpt = (page_map_t*) (task->page_map->entries[index_pml4] & PAGE_MASK);
|
||||||
if (!pgt) {
|
if (!pgt) {
|
||||||
kputs("map_region: out of memory\n");
|
kputs("map_region: out of memory\n");
|
||||||
ret = 0;
|
ret = 0;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
pgt = (page_table_t*) (pgt->entries[idx_dirp] & PAGE_MASK);
|
pgd = (page_map_t*) (pdpt->entries[index_pdpt] & PAGE_MASK);
|
||||||
if (!pgt) {
|
if (!pgd) {
|
||||||
kputs("map_region: out of memory\n");
|
kputs("map_region: out of memory\n");
|
||||||
ret = 0;
|
ret = 0;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
pgt = (page_table_t*) (pgt->entries[idx_dir] & PAGE_MASK);
|
pgt = (page_map_t*) (pgd->entries[index_pgd] & PAGE_MASK);
|
||||||
if (!pgt) {
|
if (!pgt) {
|
||||||
kputs("map_region: out of memory\n");
|
kputs("map_region: out of memory\n");
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
@ -215,27 +213,27 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
|
||||||
// Currently, we allocate pages only in kernel space.
|
// Currently, we allocate pages only in kernel space.
|
||||||
// => physical address of the page table is identical of the virtual address
|
// => physical address of the page table is identical of the virtual address
|
||||||
//if (paging_enabled)
|
//if (paging_enabled)
|
||||||
// pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index*PAGE_SIZE) & PAGE_MASK);
|
// pgt = (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index*PAGE_SIZE) & PAGE_MASK);
|
||||||
|
|
||||||
if (pgt->entries[idx_table] && !(flags & MAP_REMAP)) {
|
if (pgt->entries[index_pgt] && !(flags & MAP_REMAP)) {
|
||||||
kprintf("0x%x is already mapped\n", viraddr);
|
kprintf("0x%x is already mapped\n", viraddr);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (flags & MAP_USER_SPACE)
|
if (flags & MAP_USER_SPACE)
|
||||||
pgt->entries[idx_table] = USER_PAGE|(phyaddr & PAGE_MASK);
|
pgt->entries[index_pgt] = USER_PAGE|(phyaddr & PAGE_MASK);
|
||||||
else
|
else
|
||||||
pgt->entries[idx_table] = KERN_PAGE|(phyaddr & PAGE_MASK);
|
pgt->entries[index_pgt] = KERN_PAGE|(phyaddr & PAGE_MASK);
|
||||||
|
|
||||||
if (flags & MAP_NO_CACHE)
|
if (flags & MAP_NO_CACHE)
|
||||||
pgt->entries[idx_table] |= PG_PCD;
|
pgt->entries[index_pgt] |= PG_PCD;
|
||||||
|
|
||||||
if (flags & MAP_NO_ACCESS)
|
if (flags & MAP_NO_ACCESS)
|
||||||
pgt->entries[idx_table] &= ~PG_PRESENT;
|
pgt->entries[index_pgt] &= ~PG_PRESENT;
|
||||||
|
|
||||||
if (flags & MAP_WT)
|
if (flags & MAP_WT)
|
||||||
pgt->entries[idx_table] |= PG_PWT;
|
pgt->entries[index_pgt] |= PG_PWT;
|
||||||
|
|
||||||
if (flags & MAP_USER_SPACE)
|
if (flags & MAP_USER_SPACE)
|
||||||
atomic_int32_inc(&task->user_usage);
|
atomic_int32_inc(&task->user_usage);
|
||||||
|
@ -247,7 +245,7 @@ out:
|
||||||
if (flags & MAP_KERNEL_SPACE)
|
if (flags & MAP_KERNEL_SPACE)
|
||||||
spinlock_unlock(&kslock);
|
spinlock_unlock(&kslock);
|
||||||
else
|
else
|
||||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
spinlock_irqsave_unlock(&task->page_lock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -258,18 +256,18 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
|
||||||
uint32_t index1, index2, newflags;
|
uint32_t index1, index2, newflags;
|
||||||
size_t viraddr = start & PAGE_MASK;
|
size_t viraddr = start & PAGE_MASK;
|
||||||
size_t phyaddr;
|
size_t phyaddr;
|
||||||
page_table_t* pgt;
|
page_map_t* pgt;
|
||||||
page_dir_t* pgd;
|
page_map_t* pgd;
|
||||||
task_t* task = per_core(current_task);
|
task_t* task = per_core(current_task);
|
||||||
|
|
||||||
if (BUILTIN_EXPECT(!paging_enabled, 0))
|
if (BUILTIN_EXPECT(!paging_enabled, 0))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
pgd = per_core(current_task)->pgd;
|
pgd = per_core(current_task)->page_map;
|
||||||
if (BUILTIN_EXPECT(!pgd, 0))
|
if (BUILTIN_EXPECT(!pgd, 0))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
spinlock_lock(&task->pgd_lock);
|
spinlock_lock(&task->page_lock);
|
||||||
|
|
||||||
while (viraddr < end)
|
while (viraddr < end)
|
||||||
{
|
{
|
||||||
|
@ -277,7 +275,7 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
|
||||||
index2 = (viraddr >> 12) & 0x3FF;
|
index2 = (viraddr >> 12) & 0x3FF;
|
||||||
|
|
||||||
while ((viraddr < end) && (index2 < 1024)) {
|
while ((viraddr < end) && (index2 < 1024)) {
|
||||||
pgt = (page_table_t*) (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
pgt = (page_map_t*) (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||||
if (pgt && pgt->entries[index2]) {
|
if (pgt && pgt->entries[index2]) {
|
||||||
phyaddr = pgt->entries[index2] & PAGE_MASK;
|
phyaddr = pgt->entries[index2] & PAGE_MASK;
|
||||||
newflags = pgt->entries[index2] & 0xFFF; // get old flags
|
newflags = pgt->entries[index2] & 0xFFF; // get old flags
|
||||||
|
@ -292,16 +290,8 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
|
||||||
// update flags
|
// update flags
|
||||||
if (!(flags & VMA_WRITE)) {
|
if (!(flags & VMA_WRITE)) {
|
||||||
newflags &= ~PG_RW;
|
newflags &= ~PG_RW;
|
||||||
#ifdef CONFIG_ROCKCREEK
|
|
||||||
if (newflags & (PG_SVM_STRONG|PG_SVM_LAZYRELEASE))
|
|
||||||
newflags &= ~PG_MPE;
|
|
||||||
#endif
|
|
||||||
} else {
|
} else {
|
||||||
newflags |= PG_RW;
|
newflags |= PG_RW;
|
||||||
#ifdef CONFIG_ROCKCREEK
|
|
||||||
if (newflags & (PG_SVM_STRONG|PG_SVM_LAZYRELEASE))
|
|
||||||
newflags |= PG_MPE;
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pgt->entries[index2] = (newflags & 0xFFF) | (phyaddr & PAGE_MASK);
|
pgt->entries[index2] = (newflags & 0xFFF) | (phyaddr & PAGE_MASK);
|
||||||
|
@ -314,7 +304,7 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
spinlock_unlock(&task->pgd_lock);
|
spinlock_unlock(&task->page_lock);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -330,9 +320,11 @@ size_t vm_alloc(uint32_t npages, uint32_t flags)
|
||||||
task_t* task = per_core(current_task);
|
task_t* task = per_core(current_task);
|
||||||
size_t viraddr, i, j, ret = 0;
|
size_t viraddr, i, j, ret = 0;
|
||||||
size_t start, end;
|
size_t start, end;
|
||||||
page_table_t* pgt;
|
page_map_t* pdpt, * pgd, * pgt;
|
||||||
|
uint16_t index_pml4, index_pdpt;
|
||||||
|
uint16_t index_pgd, index_pgt;
|
||||||
|
|
||||||
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
|
if (BUILTIN_EXPECT(!task || !task->page_map || !paging_enabled, 0))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (flags & MAP_KERNEL_SPACE) {
|
if (flags & MAP_KERNEL_SPACE) {
|
||||||
|
@ -349,40 +341,40 @@ size_t vm_alloc(uint32_t npages, uint32_t flags)
|
||||||
if (flags & MAP_KERNEL_SPACE)
|
if (flags & MAP_KERNEL_SPACE)
|
||||||
spinlock_lock(&kslock);
|
spinlock_lock(&kslock);
|
||||||
else
|
else
|
||||||
spinlock_irqsave_lock(&task->pgd_lock);
|
spinlock_irqsave_lock(&task->page_lock);
|
||||||
|
|
||||||
viraddr = i = start;
|
viraddr = i = start;
|
||||||
j = 0;
|
j = 0;
|
||||||
do {
|
do {
|
||||||
uint16_t idx_pd4 = (viraddr >> 39) & 0x1FF;
|
index_pml4 = (viraddr >> 39) & 0x1FF;
|
||||||
uint16_t idx_dirp = (viraddr >> 30) & 0x1FF;
|
index_pdpt = (viraddr >> 30) & 0x1FF;
|
||||||
uint16_t idx_dir = (viraddr >> 21) & 0x1FF;
|
index_pgd = (viraddr >> 21) & 0x1FF;
|
||||||
uint16_t idx_table = (viraddr >> 12) & 0x1FF;
|
index_pgt = (viraddr >> 12) & 0x1FF;
|
||||||
|
|
||||||
// Currently, we allocate pages only in kernel space.
|
// Currently, we allocate pages only in kernel space.
|
||||||
// => physical address of the page table is identical of the virtual address
|
// => physical address of the page table is identical of the virtual address
|
||||||
pgt = (page_table_t*) (task->pgd->entries[idx_pd4] & PAGE_MASK);
|
pdpt = (page_map_t*) (task->page_map->entries[index_pml4] & PAGE_MASK);
|
||||||
if (!pgt) {
|
if (!pdpt) {
|
||||||
i += (size_t)PGT_ENTRIES*PGT_ENTRIES*PGT_ENTRIES*PAGE_SIZE;
|
i += (size_t)MAP_ENTRIES*MAP_ENTRIES*MAP_ENTRIES*PAGE_SIZE;
|
||||||
j += PGT_ENTRIES*PGT_ENTRIES*PGT_ENTRIES;
|
j += MAP_ENTRIES*MAP_ENTRIES*MAP_ENTRIES;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
pgt = (page_table_t*) (pgt->entries[idx_dirp] & PAGE_MASK);
|
pgd = (page_map_t*) (pdpt->entries[index_pdpt] & PAGE_MASK);
|
||||||
if (!pgt) {
|
if (!pgd) {
|
||||||
i += PGT_ENTRIES*PGT_ENTRIES*PAGE_SIZE;
|
i += MAP_ENTRIES*MAP_ENTRIES*PAGE_SIZE;
|
||||||
j += PGT_ENTRIES*PGT_ENTRIES;
|
j += MAP_ENTRIES*MAP_ENTRIES;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
pgt = (page_table_t*) (pgt->entries[idx_dir] & PAGE_MASK);
|
pgt = (page_map_t*) (pgd->entries[index_pgd] & PAGE_MASK);
|
||||||
if (!pgt) {
|
if (!pgt) {
|
||||||
i += PGT_ENTRIES*PAGE_SIZE;
|
i += MAP_ENTRIES*PAGE_SIZE;
|
||||||
j += PGT_ENTRIES;
|
j += MAP_ENTRIES;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!(pgt->entries[idx_table])) {
|
if (!(pgt->entries[index_pgt])) {
|
||||||
i += PAGE_SIZE;
|
i += PAGE_SIZE;
|
||||||
j++;
|
j++;
|
||||||
} else {
|
} else {
|
||||||
|
@ -399,7 +391,7 @@ size_t vm_alloc(uint32_t npages, uint32_t flags)
|
||||||
if (flags & MAP_KERNEL_SPACE)
|
if (flags & MAP_KERNEL_SPACE)
|
||||||
spinlock_unlock(&kslock);
|
spinlock_unlock(&kslock);
|
||||||
else
|
else
|
||||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
spinlock_irqsave_unlock(&task->page_lock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -407,52 +399,52 @@ size_t vm_alloc(uint32_t npages, uint32_t flags)
|
||||||
int unmap_region(size_t viraddr, uint32_t npages)
|
int unmap_region(size_t viraddr, uint32_t npages)
|
||||||
{
|
{
|
||||||
task_t* task = per_core(current_task);
|
task_t* task = per_core(current_task);
|
||||||
page_table_t* pgt;
|
page_map_t* pdpt, * pgd, * pgt;
|
||||||
size_t i;
|
size_t i;
|
||||||
uint16_t idx_pd4, idx_dirp;
|
uint16_t index_pml4, index_pdpt;
|
||||||
uint16_t idx_dir, idx_table;
|
uint16_t index_pgd, index_pgt;
|
||||||
|
|
||||||
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
|
if (BUILTIN_EXPECT(!task || !task->page_map || !paging_enabled, 0))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (viraddr <= KERNEL_SPACE)
|
if (viraddr <= KERNEL_SPACE)
|
||||||
spinlock_lock(&kslock);
|
spinlock_lock(&kslock);
|
||||||
else
|
else
|
||||||
spinlock_irqsave_lock(&task->pgd_lock);
|
spinlock_irqsave_lock(&task->page_lock);
|
||||||
|
|
||||||
i = 0;
|
i = 0;
|
||||||
while(i<npages)
|
while(i<npages)
|
||||||
{
|
{
|
||||||
idx_pd4 = (viraddr >> 39) & 0x1FF;
|
index_pml4 = (viraddr >> 39) & 0x1FF;
|
||||||
idx_dirp = (viraddr >> 30) & 0x1FF;
|
index_pdpt = (viraddr >> 30) & 0x1FF;
|
||||||
idx_dir = (viraddr >> 21) & 0x1FF;
|
index_pgd = (viraddr >> 21) & 0x1FF;
|
||||||
idx_table = (viraddr >> 12) & 0x1FF;
|
index_pgt = (viraddr >> 12) & 0x1FF;
|
||||||
|
|
||||||
// Currently, we allocate pages only in kernel space.
|
// Currently, we allocate pages only in kernel space.
|
||||||
// => physical address of the page table is identical of the virtual address
|
// => physical address of the page table is identical of the virtual address
|
||||||
pgt = (page_table_t*) (task->pgd->entries[idx_pd4] & PAGE_MASK);
|
pdpt = (page_map_t*) (task->page_map->entries[index_pml4] & PAGE_MASK);
|
||||||
if (!pgt) {
|
if (!pdpt) {
|
||||||
viraddr += (size_t) PGT_ENTRIES*PGT_ENTRIES*PGT_ENTRIES*PAGE_SIZE;
|
viraddr += (size_t) MAP_ENTRIES*MAP_ENTRIES*MAP_ENTRIES*PAGE_SIZE;
|
||||||
i += PGT_ENTRIES*PGT_ENTRIES*PGT_ENTRIES;
|
i += MAP_ENTRIES*MAP_ENTRIES*MAP_ENTRIES;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
pgt = (page_table_t*) (pgt->entries[idx_dirp] & PAGE_MASK);
|
pgd = (page_map_t*) (pdpt->entries[index_pdpt] & PAGE_MASK);
|
||||||
if (!pgt) {
|
if (!pgd) {
|
||||||
viraddr += PGT_ENTRIES*PGT_ENTRIES*PAGE_SIZE;
|
viraddr += MAP_ENTRIES*MAP_ENTRIES*PAGE_SIZE;
|
||||||
i += PGT_ENTRIES*PGT_ENTRIES;
|
i += MAP_ENTRIES*MAP_ENTRIES;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
pgt = (page_table_t*) (pgt->entries[idx_dir] & PAGE_MASK);
|
pgt = (page_map_t*) (pgd->entries[index_pgd] & PAGE_MASK);
|
||||||
if (!pgt) {
|
if (!pgt) {
|
||||||
viraddr += PGT_ENTRIES*PAGE_SIZE;
|
viraddr += MAP_ENTRIES*PAGE_SIZE;
|
||||||
i += PGT_ENTRIES;
|
i += MAP_ENTRIES;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pgt->entries[idx_table])
|
if (pgt->entries[index_pgt])
|
||||||
pgt->entries[idx_table] &= ~PG_PRESENT;
|
pgt->entries[index_pgt] &= ~PG_PRESENT;
|
||||||
|
|
||||||
viraddr +=PAGE_SIZE;
|
viraddr +=PAGE_SIZE;
|
||||||
i++;
|
i++;
|
||||||
|
@ -466,7 +458,7 @@ int unmap_region(size_t viraddr, uint32_t npages)
|
||||||
if (viraddr <= KERNEL_SPACE)
|
if (viraddr <= KERNEL_SPACE)
|
||||||
spinlock_unlock(&kslock);
|
spinlock_unlock(&kslock);
|
||||||
else
|
else
|
||||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
spinlock_irqsave_unlock(&task->page_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -474,52 +466,52 @@ int unmap_region(size_t viraddr, uint32_t npages)
|
||||||
int vm_free(size_t viraddr, uint32_t npages)
|
int vm_free(size_t viraddr, uint32_t npages)
|
||||||
{
|
{
|
||||||
task_t* task = per_core(current_task);
|
task_t* task = per_core(current_task);
|
||||||
page_table_t* pgt;
|
page_map_t* pdpt, * pgd, * pgt;
|
||||||
size_t i;
|
size_t i;
|
||||||
uint16_t idx_pd4, idx_dirp;
|
uint16_t index_pml4, index_pdpt;
|
||||||
uint16_t idx_dir, idx_table;
|
uint16_t index_pgd, index_pgt;
|
||||||
|
|
||||||
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
|
if (BUILTIN_EXPECT(!task || !task->page_map || !paging_enabled, 0))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (viraddr <= KERNEL_SPACE)
|
if (viraddr <= KERNEL_SPACE)
|
||||||
spinlock_lock(&kslock);
|
spinlock_lock(&kslock);
|
||||||
else
|
else
|
||||||
spinlock_irqsave_lock(&task->pgd_lock);
|
spinlock_irqsave_lock(&task->page_lock);
|
||||||
|
|
||||||
i = 0;
|
i = 0;
|
||||||
while(i<npages)
|
while(i<npages)
|
||||||
{
|
{
|
||||||
idx_pd4 = (viraddr >> 39) & 0x1FF;
|
index_pml4 = (viraddr >> 39) & 0x1FF;
|
||||||
idx_dirp = (viraddr >> 30) & 0x1FF;
|
index_pdpt = (viraddr >> 30) & 0x1FF;
|
||||||
idx_dir = (viraddr >> 21) & 0x1FF;
|
index_pgd = (viraddr >> 21) & 0x1FF;
|
||||||
idx_table = (viraddr >> 12) & 0x1FF;
|
index_pgt = (viraddr >> 12) & 0x1FF;
|
||||||
|
|
||||||
// Currently, we allocate pages only in kernel space.
|
// Currently, we allocate pages only in kernel space.
|
||||||
// => physical address of the page table is identical of the virtual address
|
// => physical address of the page table is identical of the virtual address
|
||||||
pgt = (page_table_t*) (task->pgd->entries[idx_pd4] & PAGE_MASK);
|
pdpt = (page_map_t*) (task->page_map->entries[index_pml4] & PAGE_MASK);
|
||||||
if (!pgt) {
|
if (!pdpt) {
|
||||||
viraddr += (size_t) PGT_ENTRIES*PGT_ENTRIES*PGT_ENTRIES*PAGE_SIZE;
|
viraddr += (size_t) MAP_ENTRIES*MAP_ENTRIES*MAP_ENTRIES*PAGE_SIZE;
|
||||||
i += PGT_ENTRIES*PGT_ENTRIES*PGT_ENTRIES;
|
i += MAP_ENTRIES*MAP_ENTRIES*MAP_ENTRIES;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
pgt = (page_table_t*) (pgt->entries[idx_dirp] & PAGE_MASK);
|
pgd = (page_map_t*) (pdpt->entries[index_pdpt] & PAGE_MASK);
|
||||||
if (!pgt) {
|
if (!pgd) {
|
||||||
viraddr += PGT_ENTRIES*PGT_ENTRIES*PAGE_SIZE;
|
viraddr += MAP_ENTRIES*MAP_ENTRIES*PAGE_SIZE;
|
||||||
i += PGT_ENTRIES*PGT_ENTRIES;
|
i += MAP_ENTRIES*MAP_ENTRIES;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
pgt = (page_table_t*) (pgt->entries[idx_dir] & PAGE_MASK);
|
pgt = (page_map_t*) (pgd->entries[index_pgd] & PAGE_MASK);
|
||||||
if (!pgt) {
|
if (!pgt) {
|
||||||
viraddr += PGT_ENTRIES*PAGE_SIZE;
|
viraddr += MAP_ENTRIES*PAGE_SIZE;
|
||||||
i += PGT_ENTRIES;
|
i += MAP_ENTRIES;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pgt->entries[idx_table])
|
if (pgt->entries[index_pgt])
|
||||||
pgt->entries[idx_table] = 0;
|
pgt->entries[index_pgt] = 0;
|
||||||
|
|
||||||
viraddr +=PAGE_SIZE;
|
viraddr +=PAGE_SIZE;
|
||||||
i++;
|
i++;
|
||||||
|
@ -530,7 +522,7 @@ int vm_free(size_t viraddr, uint32_t npages)
|
||||||
if (viraddr <= KERNEL_SPACE)
|
if (viraddr <= KERNEL_SPACE)
|
||||||
spinlock_unlock(&kslock);
|
spinlock_unlock(&kslock);
|
||||||
else
|
else
|
||||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
spinlock_irqsave_unlock(&task->page_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -538,8 +530,8 @@ int vm_free(size_t viraddr, uint32_t npages)
|
||||||
static void pagefault_handler(struct state *s)
|
static void pagefault_handler(struct state *s)
|
||||||
{
|
{
|
||||||
task_t* task = per_core(current_task);
|
task_t* task = per_core(current_task);
|
||||||
//page_dir_t* pgd = task->pgd;
|
//page_map_t* pgd = task->page_map;
|
||||||
//page_table_t* pgt = NULL;
|
//page_map_t* pgt = NULL;
|
||||||
size_t viraddr = read_cr2();
|
size_t viraddr = read_cr2();
|
||||||
//size_t phyaddr;
|
//size_t phyaddr;
|
||||||
|
|
||||||
|
|
|
@ -62,7 +62,7 @@ extern "C" {
|
||||||
#define TASK_L2 (1 << 3)
|
#define TASK_L2 (1 << 3)
|
||||||
|
|
||||||
typedef int (*entry_point_t)(void*);
|
typedef int (*entry_point_t)(void*);
|
||||||
struct page_dir;
|
typedef struct page_map page_map_t;
|
||||||
|
|
||||||
/** @brief The task_t structure */
|
/** @brief The task_t structure */
|
||||||
typedef struct task {
|
typedef struct task {
|
||||||
|
@ -88,10 +88,10 @@ typedef struct task {
|
||||||
uint32_t last_core;
|
uint32_t last_core;
|
||||||
/// usage in number of pages
|
/// usage in number of pages
|
||||||
atomic_int32_t user_usage;
|
atomic_int32_t user_usage;
|
||||||
/// avoids concurrent access to the page directory
|
/// avoids concurrent access to the page map structures
|
||||||
spinlock_irqsave_t pgd_lock;
|
spinlock_irqsave_t page_lock;
|
||||||
/// pointer to the page directory
|
/// pointer to page directory (32bit) or page map level 4 (64bit) table respectively
|
||||||
struct page_dir* pgd;
|
page_map_t* page_map;
|
||||||
/// lock for the VMA_list
|
/// lock for the VMA_list
|
||||||
spinlock_t vma_lock;
|
spinlock_t vma_lock;
|
||||||
/// list of VMAs
|
/// list of VMAs
|
||||||
|
|
|
@ -104,7 +104,7 @@ int multitasking_init(void) {
|
||||||
|
|
||||||
mailbox_wait_msg_init(&task_table[0].inbox);
|
mailbox_wait_msg_init(&task_table[0].inbox);
|
||||||
memset(task_table[0].outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS);
|
memset(task_table[0].outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS);
|
||||||
task_table[0].pgd = get_boot_pgd();
|
task_table[0].page_map = get_boot_page_map();
|
||||||
task_table[0].flags = TASK_DEFAULT_FLAGS;
|
task_table[0].flags = TASK_DEFAULT_FLAGS;
|
||||||
task_table[0].prio = IDLE_PRIO;
|
task_table[0].prio = IDLE_PRIO;
|
||||||
task_table[0].stack = (void*) &boot_stack;
|
task_table[0].stack = (void*) &boot_stack;
|
||||||
|
@ -128,7 +128,7 @@ size_t get_idle_task(uint32_t id)
|
||||||
atomic_int32_set(&task_table[id].user_usage, 0);
|
atomic_int32_set(&task_table[id].user_usage, 0);
|
||||||
mailbox_wait_msg_init(&task_table[id].inbox);
|
mailbox_wait_msg_init(&task_table[id].inbox);
|
||||||
memset(task_table[id].outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS);
|
memset(task_table[id].outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS);
|
||||||
task_table[id].pgd = get_boot_pgd();
|
task_table[id].page_map = get_boot_page_map();
|
||||||
current_task[id].var = task_table+id;
|
current_task[id].var = task_table+id;
|
||||||
runqueues[id].idle = task_table+id;
|
runqueues[id].idle = task_table+id;
|
||||||
|
|
||||||
|
@ -242,7 +242,7 @@ static void NORETURN do_exit(int arg) {
|
||||||
|
|
||||||
spinlock_unlock(&curr_task->vma_lock);
|
spinlock_unlock(&curr_task->vma_lock);
|
||||||
|
|
||||||
drop_pgd(); // delete page directory and its page tables
|
drop_page_map(); // delete page directory and its page tables
|
||||||
|
|
||||||
#if 0
|
#if 0
|
||||||
if (atomic_int32_read(&curr_task->user_usage))
|
if (atomic_int32_read(&curr_task->user_usage))
|
||||||
|
@ -327,7 +327,7 @@ static int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio, uin
|
||||||
if (task_table[i].status == TASK_INVALID) {
|
if (task_table[i].status == TASK_INVALID) {
|
||||||
atomic_int32_set(&task_table[i].user_usage, 0);
|
atomic_int32_set(&task_table[i].user_usage, 0);
|
||||||
|
|
||||||
ret = create_pgd(task_table+i, 0);
|
ret = create_page_map(task_table+i, 0);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto create_task_out;
|
goto create_task_out;
|
||||||
|
@ -400,7 +400,7 @@ int sys_fork(void)
|
||||||
if (task_table[i].status == TASK_INVALID) {
|
if (task_table[i].status == TASK_INVALID) {
|
||||||
atomic_int32_set(&task_table[i].user_usage, 0);
|
atomic_int32_set(&task_table[i].user_usage, 0);
|
||||||
|
|
||||||
ret = create_pgd(task_table+i, 1);
|
ret = create_page_map(task_table+i, 1);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto create_task_out;
|
goto create_task_out;
|
||||||
|
|
Loading…
Add table
Reference in a new issue