add build environment for 64bit code

=> environment build 64bit kernel, but the kernel is not tested!
=> work in progess
This commit is contained in:
Stefan Lankes 2012-05-24 10:49:45 +02:00
parent 83bd8ea677
commit 45313d47f2
25 changed files with 254 additions and 79 deletions

View file

@ -51,8 +51,8 @@ inline static void irq_disable(void) {
* @return The set of flags which have been set until now
*/
inline static uint32_t irq_nested_disable(void) {
uint32_t flags;
asm volatile("pushf; cli; popl %0": "=r"(flags) : : "memory");
size_t flags;
asm volatile("pushf; cli; pop %0": "=r"(flags) : : "memory");
if (flags & (1 << 9))
return 1;
return 0;
@ -83,8 +83,8 @@ inline static void irq_nested_enable(uint32_t flags) {
*/
inline static uint32_t is_irq_enabled(void)
{
uint32_t flags;
asm volatile("pushf; popl %0": "=r"(flags) : : "memory");
size_t flags;
asm volatile("pushf; pop %0": "=r"(flags) : : "memory");
if (flags & (1 << 9))
return 1;
return 0;

View file

@ -83,6 +83,12 @@
#define KERN_PAGE (PG_PRESENT|PG_RW|PG_GLOBAL)
/// This is a whole set of flags (PRESENT,RW,USER) for userspace pages
#define USER_PAGE (PG_PRESENT|PG_RW|PG_USER)
#if __SIZEOF_POINTER__ == 4
#define PGT_ENTRIES 1024
#elif __SIZEOF_POINTER__ == 8
#define PGT_ENTRIES 512
#endif
/** @brief Page table structure
*
@ -92,7 +98,7 @@
typedef struct page_table
{
/// Page table entries are unsigned 32bit integers.
uint32_t entries[1024];
size_t entries[PGT_ENTRIES];
} page_table_t __attribute__ ((aligned (4096)));
/** @brief Page directory structure
@ -103,7 +109,7 @@ typedef struct page_table
typedef struct page_dir
{
/// Page dir entries are unsigned 32bit integers.
uint32_t entries[1024];
size_t entries[PGT_ENTRIES];
} page_dir_t __attribute__ ((aligned (4096)));
/** @brief Converts a virtual address to a physical

View file

@ -173,8 +173,8 @@ inline static uint64_t rdmsr(uint32_t msr) {
/** @brief Read cr0 register
* @return cr0's value
*/
static inline uint32_t read_cr0(void) {
uint32_t val;
static inline size_t read_cr0(void) {
size_t val;
asm volatile("mov %%cr0, %0" : "=r"(val));
return val;
}
@ -182,15 +182,15 @@ static inline uint32_t read_cr0(void) {
/** @brief Write a value into cr0 register
* @param val The value you want to write into cr0
*/
static inline void write_cr0(uint32_t val) {
static inline void write_cr0(size_t val) {
asm volatile("mov %0, %%cr0" : : "r"(val));
}
/** @brief Read cr2 register
* @return cr2's value
*/
static inline uint32_t read_cr2(void) {
uint32_t val;
static inline size_t read_cr2(void) {
size_t val;
asm volatile("mov %%cr2, %0" : "=r"(val));
return val;
}
@ -198,8 +198,8 @@ static inline uint32_t read_cr2(void) {
/** @brief Read cr3 register
* @return cr3's value
*/
static inline uint32_t read_cr3(void) {
uint32_t val;
static inline size_t read_cr3(void) {
size_t val;
asm volatile("mov %%cr3, %0" : "=r"(val));
return val;
}
@ -207,15 +207,15 @@ static inline uint32_t read_cr3(void) {
/** @brief Write a value into cr3 register
* @param val The value you want to write into cr3
*/
static inline void write_cr3(uint32_t val) {
static inline void write_cr3(size_t val) {
asm volatile("mov %0, %%cr3" : : "r"(val));
}
/** @brief Read cr4 register
* @return cr4's value
*/
static inline uint32_t read_cr4(void) {
uint32_t val;
static inline size_t read_cr4(void) {
size_t val;
asm volatile("mov %%cr4, %0" : "=r"(val));
return val;
}
@ -223,7 +223,7 @@ static inline uint32_t read_cr4(void) {
/** @brief Write a value into cr4 register
* @param val The value you want to write into cr4
*/
static inline void write_cr4(uint32_t val) {
static inline void write_cr4(size_t val) {
asm volatile("mov %0, %%cr4" : : "r"(val));
}

View file

@ -32,6 +32,8 @@
extern "C" {
#endif
#if __SIZEOF_POINTER__ == 4
#define CONFIG_X86_32
/// A popular type for addresses
typedef unsigned long size_t;
/// Pointer differences
@ -40,6 +42,19 @@ typedef long ptrdiff_t;
typedef long ssize_t;
typedef long off_t;
#endif
#elif __SIZEOF_POINTER__ == 8
#define CONFIG_X86_64
// A popular type for addresses
typedef unsigned long long size_t;
/// Pointer differences
typedef long long ptrdiff_t;
#ifdef __KERNEL__
typedef long long ssize_t;
typedef long long off_t;
#endif
#else
#error unsupported architecture
#endif
/// Unsigned 64 bit integer
typedef unsigned long long uint64_t;

View file

@ -71,7 +71,7 @@ void switch_context(size_t** stack);
* - 0 on success
* - -EINVAL (-22) on failure
*/
int create_default_frame(task_t* task, internal_entry_point_t ep, void* arg);
int create_default_frame(task_t* task, entry_point_t ep, void* arg);
/** @brief Register a task's TSS at GDT
*
@ -91,9 +91,13 @@ int register_task(task_t* task);
*/
static inline int jump_to_user_code(uint32_t ep, uint32_t stack)
{
#ifdef CONFIG_X86_32
asm volatile ("mov %0, %%ds; mov %0, %%fs; mov %0, %%gs; mov %0, %%es" :: "r"(0x23));
asm volatile ("push $0x23; push %0; push $0x1B; push %1" :: "r"(stack), "r"(ep));
asm volatile ("lret" ::: "cc");
#else
#warning Currently, not supported!
#endif
return 0;
}

View file

@ -1,5 +1,5 @@
C_source := gdt.c kb.c timer.c irq.c isrs.c idt.c vga.c multiboot.c apic.c pci.c processor.c
ASM_source := entry.asm string.asm
ASM_source := entry.asm string$(BIT).asm
MODULE := arch_x86_kernel
include $(TOPDIR)/Makefile.inc

View file

@ -29,7 +29,7 @@
gdt_ptr_t gp;
tss_t task_state_segments[MAX_CORES] __attribute__ ((aligned (PAGE_SIZE)));
static unsigned char kstacks[MAX_TASKS][KERNEL_STACK_SIZE] __attribute__ ((aligned (PAGE_SIZE))) = {[0 ... MAX_TASKS-1][0 ... KERNEL_STACK_SIZE-1] = 0xCD};
uint32_t default_stack_pointer = (uint32_t) kstacks[0] + KERNEL_STACK_SIZE - sizeof(size_t);
size_t default_stack_pointer = (size_t) kstacks[0] + KERNEL_STACK_SIZE - sizeof(size_t);
// currently, our kernel has full access to the ioports
static gdt_entry_t gdt[GDT_ENTRIES] = {[0 ... GDT_ENTRIES-1] = {0, 0, 0, 0, 0, 0}};
@ -84,6 +84,7 @@ int arch_fork(task_t* task)
// copy kernel stack of the current task
memcpy(kstacks[id], kstacks[curr_task->id], KERNEL_STACK_SIZE);
#ifdef CONFIG_X86_32
asm volatile ("mov %%esp, %0" : "=r"(esp));
esp -= (uint32_t) kstacks[curr_task->id];
esp += (uint32_t) kstacks[id];
@ -115,11 +116,15 @@ int arch_fork(task_t* task)
state->eflags |= (1 << 9);
// This will be the entry point for the new task.
asm volatile ("call read_eip" : "=a"(state->eip));
#else
#warning Currently, not supported!
return -1;
#endif
return 0;
}
int create_default_frame(task_t* task, internal_entry_point_t ep, void* arg)
int create_default_frame(task_t* task, entry_point_t ep, void* arg)
{
uint16_t cs = 0x08;
uint32_t id;
@ -132,6 +137,7 @@ int create_default_frame(task_t* task, internal_entry_point_t ep, void* arg)
memset(kstacks[id], 0xCD, KERNEL_STACK_SIZE);
#ifdef CONFIG_X86_32
/* The difference between setting up a task for SW-task-switching
* and not for HW-task-switching is setting up a stack and not a TSS.
* This is the stack which will be activated and popped off for iret later.
@ -169,6 +175,10 @@ int create_default_frame(task_t* task, internal_entry_point_t ep, void* arg)
/* Set the task's stack pointer entry to the stack we have crafted right now. */
task->stack = (size_t*)stack;
#else
#warning Currently, not supported
return -1;
#endif
return 0;
}

View file

@ -0,0 +1,99 @@
;
; Written by the Chair for Operating Systems, RWTH Aachen University
;
; NO Copyright (C) 2010-2012, Stefan Lankes
; consider these trivial functions to be public domain.
;
; These functions are distributed on an "AS IS" BASIS,
; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
;
; TODO
%if 0
[BITS 32]
SECTION .text
global strcpy
strcpy:
push ebp
mov ebp, esp
push edi
push esi
mov esi, [ebp+12]
mov edi, [ebp+8]
L1:
lodsb
stosb
test al, al
jne L1
mov eax, [ebp+8]
pop esi
pop edi
pop ebp
ret
global strncpy
strncpy:
push ebp
mov ebp, esp
push edi
push esi
mov ecx, [ebp+16]
mov esi, [ebp+12]
mov edi, [ebp+8]
L2:
dec ecx
js L3
lodsb
stosb
test al, al
jne L1
rep
stosb
L3:
mov eax, [ebp+8]
pop esi
pop edi
pop ebp
ret
; The following function is derived from JamesM's kernel development tutorials
; (http://www.jamesmolloy.co.uk/tutorial_html/)
global copy_page_physical
copy_page_physical:
push esi ; According to __cdecl, we must preserve the contents of ESI
push edi ; and EDI.
pushf ; push EFLAGS, so we can pop it and reenable interrupts
; later, if they were enabled anyway.
cli ; Disable interrupts, so we aren't interrupted.
; Load these in BEFORE we disable paging!
mov edi, [esp+12+4] ; Destination address
mov esi, [esp+12+8] ; Source address
mov edx, cr0 ; Get the control register...
and edx, 0x7fffffff ; and...
mov cr0, edx ; Disable paging.
cld
mov ecx, 0x400 ; 1024*4bytes = 4096 bytes = page size
rep movsd ; copy page
mov edx, cr0 ; Get the control register again
or edx, 0x80000000 ; and...
mov cr0, edx ; Enable paging.
popf ; Pop EFLAGS back.
pop edi ; Get the original value of EDI
pop esi ; and ESI back.
ret
%endif
SECTION .note.GNU-stack noalloc noexec nowrite progbits

View file

@ -57,7 +57,7 @@ extern const void kernel_start;
extern const void kernel_end;
// boot task's page directory and page directory lock
static page_dir_t boot_pgd = {{[0 ... 1023] = 0}};
static page_dir_t boot_pgd = {{[0 ... PGT_ENTRIES-1] = 0}};
static spinlock_t kslock = SPINLOCK_INIT;
static int paging_enabled = 0;
@ -78,6 +78,7 @@ inline static size_t copy_page_table(task_t* task, uint32_t pgd_index, page_tabl
page_table_t* new_pgt;
size_t phyaddr;
#ifdef CONFIG_X86_32
if (BUILTIN_EXPECT(!pgt, 0))
return 0;
@ -89,7 +90,7 @@ inline static size_t copy_page_table(task_t* task, uint32_t pgd_index, page_tabl
(*counter)++;
for(i=0; i<1024; i++) {
if (pgt->entries[i] & 0xFFFFF000) {
if (pgt->entries[i] & PAGE_MASK) {
if (!(pgt->entries[i] & PG_USER)) {
// Kernel page => copy only page entries
new_pgt->entries[i] = pgt->entries[i];
@ -102,7 +103,7 @@ inline static size_t copy_page_table(task_t* task, uint32_t pgd_index, page_tabl
if (counter)
(*counter)++;
copy_page_physical((void*)phyaddr, (void*) (pgt->entries[i] & 0xFFFFF000));
copy_page_physical((void*)phyaddr, (void*) (pgt->entries[i] & PAGE_MASK));
new_pgt->entries[i] = phyaddr | (pgt->entries[i] & 0xFFF);
@ -113,6 +114,10 @@ inline static size_t copy_page_table(task_t* task, uint32_t pgd_index, page_tabl
phyaddr = virt_to_phys((size_t)new_pgt);
return phyaddr;
#else
#warning Currently, not supported
return 0;
#endif
}
int create_pgd(task_t* task, int copy)
@ -131,7 +136,7 @@ int create_pgd(task_t* task, int copy)
// we already know the virtual address of the "page table container"
// (see file header)
pgt_container = (page_table_t*) ((KERNEL_SPACE - PAGE_SIZE) & 0xFFFFF000);
pgt_container = (page_table_t*) ((KERNEL_SPACE - PAGE_SIZE) & PAGE_MASK);
// create new page directory for the new task
pgd = kmalloc(sizeof(page_dir_t));
@ -159,13 +164,13 @@ int create_pgd(task_t* task, int copy)
spinlock_unlock(&kslock);
// map page table container at the end of the kernel space
viraddr = (KERNEL_SPACE - PAGE_SIZE) & 0xFFFFF000;
viraddr = (KERNEL_SPACE - PAGE_SIZE) & PAGE_MASK;
index1 = viraddr >> 22;
index2 = (viraddr >> 12) & 0x3FF;
// now, we create a self reference
pgd->entries[index1] = ((size_t) virt_to_phys((size_t) pgt) & 0xFFFFF000)|KERN_TABLE;
pgt->entries[index2] = ((size_t) virt_to_phys((size_t) pgt) & 0xFFFFF000)|KERN_PAGE;
pgd->entries[index1] = ((size_t) virt_to_phys((size_t) pgt) & PAGE_MASK)|KERN_TABLE;
pgt->entries[index2] = ((size_t) virt_to_phys((size_t) pgt) & PAGE_MASK)|KERN_PAGE;
task->pgd = pgd;
@ -178,10 +183,10 @@ int create_pgd(task_t* task, int copy)
if (!(curr_task->pgd->entries[i] & PG_USER))
continue;
phyaddr = copy_page_table(task, i, (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + i*PAGE_SIZE) & 0xFFFFF000), &counter);
phyaddr = copy_page_table(task, i, (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + i*PAGE_SIZE) & PAGE_MASK), &counter);
if (phyaddr) {
pgd->entries[i] = (phyaddr & 0xFFFFF000) | (curr_task->pgd->entries[i] & 0xFFF);
pgt->entries[i] = (phyaddr & 0xFFFFF000) | KERN_PAGE;
pgd->entries[i] = (phyaddr & PAGE_MASK) | (curr_task->pgd->entries[i] & 0xFFF);
pgt->entries[i] = (phyaddr & PAGE_MASK) | KERN_PAGE;
}
}
@ -208,7 +213,7 @@ int drop_pgd(void)
for(i=0; i<1024; i++) {
if (pgd->entries[i] & PG_USER) {
put_page(pgd->entries[i] & 0xFFFFF000);
put_page(pgd->entries[i] & PAGE_MASK);
pgd->entries[i] = 0;
}
}
@ -241,14 +246,14 @@ size_t virt_to_phys(size_t viraddr)
index1 = viraddr >> 22;
index2 = (viraddr >> 12) & 0x3FF;
if (!(task->pgd->entries[index1] & 0xFFFFF000))
if (!(task->pgd->entries[index1] & PAGE_MASK))
goto out;
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & 0xFFFFF000);
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
if (!pgt || !(pgt->entries[index2]))
goto out;
ret = pgt->entries[index2] & 0xFFFFF000; // determine page frame
ret = pgt->entries[index2] & PAGE_MASK; // determine page frame
ret = ret | (viraddr & 0xFFF); // add page offset
out:
//kprintf("vir %p to phy %p\n", viraddr, ret);
@ -305,17 +310,17 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
// set the new page table into the directory
if (flags & MAP_USER_SPACE)
task->pgd->entries[index] = (uint32_t)pgt|USER_TABLE;
task->pgd->entries[index] = (size_t)pgt|USER_TABLE;
else
task->pgd->entries[index] = (uint32_t)pgt|KERN_TABLE;
task->pgd->entries[index] = (size_t)pgt|KERN_TABLE;
// if paging is already enabled, we need to use the virtual address
if (paging_enabled)
// we already know the virtual address of the "page table container"
// (see file header)
pgt_container = (page_table_t*) ((KERNEL_SPACE - PAGE_SIZE) & 0xFFFFF000);
pgt_container = (page_table_t*) ((KERNEL_SPACE - PAGE_SIZE) & PAGE_MASK);
else
pgt_container = (page_table_t*) (task->pgd->entries[(KERNEL_SPACE - PAGE_SIZE) >> 22] & 0xFFFFF000);
pgt_container = (page_table_t*) (task->pgd->entries[(KERNEL_SPACE - PAGE_SIZE) >> 22] & PAGE_MASK);
if (BUILTIN_EXPECT(!pgt_container, 0)) {
spinlock_unlock(pgd_lock);
@ -328,14 +333,14 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
// clear the page table
if (paging_enabled)
memset((void*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index*PAGE_SIZE) & 0xFFFFF000), 0x00, PAGE_SIZE);
memset((void*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index*PAGE_SIZE) & PAGE_MASK), 0x00, PAGE_SIZE);
else
memset(pgt, 0x00, PAGE_SIZE);
} else pgt = (page_table_t*) (task->pgd->entries[index] & 0xFFFFF000);
} else pgt = (page_table_t*) (task->pgd->entries[index] & PAGE_MASK);
/* convert physical address to virtual */
if (paging_enabled)
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index*PAGE_SIZE) & 0xFFFFF000);
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index*PAGE_SIZE) & PAGE_MASK);
index = (viraddr >> 12) & 0x3FF;
if (pgt->entries[index] && !(flags & MAP_REMAP)) {
@ -345,9 +350,9 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
}
if (flags & MAP_USER_SPACE)
pgt->entries[index] = USER_PAGE|(phyaddr & 0xFFFFF000);
pgt->entries[index] = USER_PAGE|(phyaddr & PAGE_MASK);
else
pgt->entries[index] = KERN_PAGE|(phyaddr & 0xFFFFF000);
pgt->entries[index] = KERN_PAGE|(phyaddr & PAGE_MASK);
if (flags & MAP_NO_CACHE)
pgt->entries[index] |= PG_PCD;
@ -387,7 +392,7 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
int change_page_permissions(size_t start, size_t end, uint32_t flags)
{
uint32_t index1, index2, newflags;
size_t viraddr = start & 0xFFFFF000;
size_t viraddr = start & PAGE_MASK;
size_t phyaddr;
page_table_t* pgt;
page_dir_t* pgd;
@ -408,9 +413,9 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
index2 = (viraddr >> 12) & 0x3FF;
while ((viraddr < end) && (index2 < 1024)) {
pgt = (page_table_t*) (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & 0xFFFFF000);
pgt = (page_table_t*) (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
if (pgt && pgt->entries[index2]) {
phyaddr = pgt->entries[index2] & 0xFFFFF000;
phyaddr = pgt->entries[index2] & PAGE_MASK;
newflags = pgt->entries[index2] & 0xFFF; // get old flags
if (!(newflags & PG_SVM_INIT)) {
@ -435,7 +440,7 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
#endif
}
pgt->entries[index2] = (newflags & 0xFFF) | (phyaddr & 0xFFFFF000);
pgt->entries[index2] = (newflags & 0xFFF) | (phyaddr & PAGE_MASK);
tlb_flush_one_page(viraddr);
}
@ -469,12 +474,12 @@ size_t vm_alloc(uint32_t npages, uint32_t flags)
if (flags & MAP_KERNEL_SPACE) {
pgd_lock = &kslock;
start = (((size_t) &kernel_end) + PAGE_SIZE) & 0xFFFFF000;
end = (KERNEL_SPACE - 2*PAGE_SIZE) & 0xFFFFF000; // we need 1 PAGE for our PGTs
start = (((size_t) &kernel_end) + PAGE_SIZE) & PAGE_MASK;
end = (KERNEL_SPACE - 2*PAGE_SIZE) & PAGE_MASK; // we need 1 PAGE for our PGTs
} else {
pgd_lock = &task->pgd_lock;
start = KERNEL_SPACE & 0xFFFFF000;
end = 0xFFFFF000;
start = KERNEL_SPACE & PAGE_MASK;
end = PAGE_MASK;
}
if (BUILTIN_EXPECT(!npages, 0))
@ -488,7 +493,7 @@ size_t vm_alloc(uint32_t npages, uint32_t flags)
index1 = i >> 22;
index2 = (i >> 12) & 0x3FF;
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & 0xFFFFF000);
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
if (!pgt || !(pgt->entries[index2])) {
i+=PAGE_SIZE;
j++;
@ -531,7 +536,7 @@ int unmap_region(size_t viraddr, uint32_t npages)
index1 = viraddr >> 22;
index2 = (viraddr >> 12) & 0x3FF;
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & 0xFFFFF000);
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
if (!pgt)
continue;
pgt->entries[index2] &= ~PG_PRESENT;
@ -570,7 +575,7 @@ int vm_free(size_t viraddr, uint32_t npages)
index1 = viraddr >> 22;
index2 = (viraddr >> 12) & 0x3FF;
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & 0xFFFFF000);
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
if (!pgt)
continue;
pgt->entries[index2] = 0;
@ -603,7 +608,7 @@ int print_paging_tree(size_t viraddr)
kprintf("\tPage directory entry %u: ", index1);
if (pgd) {
kprintf("0x%0x\n", pgd->entries[index1]);
pgt = (page_table_t*) (pgd->entries[index1] & 0xFFFFF000);
pgt = (page_table_t*) (pgd->entries[index1] & PAGE_MASK);
} else
kputs("invalid page directory\n");
@ -634,7 +639,7 @@ static void pagefault_handler(struct state *s)
#endif
if ((viraddr >= task->start_heap) && (viraddr <= task->end_heap) && (viraddr > KERNEL_SPACE)) {
viraddr = viraddr & 0xFFFFF000;
viraddr = viraddr & PAGE_MASK;
phyaddr = get_page();
if (BUILTIN_EXPECT(!phyaddr, 0))
@ -653,9 +658,9 @@ static void pagefault_handler(struct state *s)
// does our SVM system need to handle this page fault?
index1 = viraddr >> 22;
index2 = (viraddr >> 12) & 0x3FF;
if (!pgd || !(pgd->entries[index1] & 0xFFFFF000))
if (!pgd || !(pgd->entries[index1] & PAGE_MASK))
goto default_handler;
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & 0xFFFFF000);
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
if (!pgt || !(pgt->entries[index2]))
goto default_handler;
if (pgt->entries[index2] & PG_SVM_INIT) {
@ -702,8 +707,8 @@ int arch_paging_init(void)
index2 = (viraddr >> 12) & 0x3FF;
// now, we create a self reference
per_core(current_task)->pgd->entries[index1] = (((size_t) pgt) & 0xFFFFF000)|KERN_TABLE;
pgt->entries[index2] = ((size_t) pgt & 0xFFFFF000)|KERN_PAGE;
per_core(current_task)->pgd->entries[index1] = (((size_t) pgt) & PAGE_MASK)|KERN_TABLE;
pgt->entries[index2] = ((size_t) pgt & PAGE_MASK)|KERN_PAGE;
// create the other PGTs for the kernel space
for(i=0; i<KERNEL_SPACE/(1024*PAGE_SIZE)-1; i++) {
@ -715,8 +720,8 @@ int arch_paging_init(void)
}
memset((void*) phyaddr, 0, PAGE_SIZE);
per_core(current_task)->pgd->entries[i] = (phyaddr & 0xFFFFF000)|KERN_TABLE;
pgt->entries[i] = (phyaddr & 0xFFFFF000)|KERN_PAGE;
per_core(current_task)->pgd->entries[i] = (phyaddr & PAGE_MASK)|KERN_TABLE;
pgt->entries[i] = (phyaddr & PAGE_MASK)|KERN_PAGE;
}
/*
@ -746,7 +751,7 @@ int arch_paging_init(void)
* of course, mb_info has to map into the kernel space
*/
if (mb_info)
map_region((size_t) mb_info & 0xFFFFF000, (size_t) mb_info & 0xFFFFF000, 1, MAP_KERNEL_SPACE);
map_region((size_t) mb_info & PAGE_MASK, (size_t) mb_info & PAGE_MASK, 1, MAP_KERNEL_SPACE);
#if 0
/*
@ -773,7 +778,7 @@ int arch_paging_init(void)
* Therefore, we map these moduels into the kernel space.
*/
if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MODS)) {
multiboot_module_t* mmodule = (multiboot_module_t*) mb_info->mods_addr;
multiboot_module_t* mmodule = (multiboot_module_t*) ((size_t) mb_info->mods_addr);
npages = mb_info->mods_count * sizeof(multiboot_module_t) >> PAGE_SHIFT;
if (mb_info->mods_count * sizeof(multiboot_module_t) & (PAGE_SIZE-1))
@ -809,7 +814,7 @@ int arch_paging_init(void)
#endif
/* enable paging */
write_cr3((uint32_t) &boot_pgd);
write_cr3((size_t) &boot_pgd);
i = read_cr0();
i = i | (1 << 31);
write_cr0(i);

View file

@ -203,7 +203,7 @@ int svm_alloc_page(size_t addr, page_table_t* pgt)
t_vcharp mpb = (t_vcharp) ((size_t)(virt_to_phys(addr) >> PAGE_SHIFT) | ((size_t) RCCE_comm_buffer[RCCE_IAM] - RCCE_LINE_SIZE));
uint16_t offset = 0xFFFF;
addr &= 0xFFFFF000; // align address to the page boundary
addr &= PAGE_MASK; // align address to the page boundary
RCCE_acquire_lock(RC_COREID[LOCK_ID]);
@ -318,7 +318,7 @@ int svm_access_request(size_t addr)
}
#endif
addr &= 0xFFFFF000; // align address to page boundary
addr &= PAGE_MASK; // align address to page boundary
ret = change_page_permissions(addr, addr + PAGE_SIZE, VMA_READ|VMA_WRITE|VMA_CACHEABLE);
#if USE_PERFCOUNTERS
@ -634,10 +634,10 @@ void svm_flush(size_t phyaddr)
index2 = (viraddr >> 12) & 0x3FF;
/* check if pgt is present */
if (!pgd || !(pgd->entries[index1] & 0xFFFFF000))
if (!pgd || !(pgd->entries[index1] & PAGE_MASK))
goto wrong_addr;
pgt = (page_table_t*)((KERNEL_SPACE - 1024 * PAGE_SIZE + index1 * PAGE_SIZE) & 0xFFFFF000);
pgt = (page_table_t*)((KERNEL_SPACE - 1024 * PAGE_SIZE + index1 * PAGE_SIZE) & PAGE_MASK);
if( pgt->entries[index2] & PG_MPE ) {
goto flush_l1;

View file

@ -400,7 +400,7 @@ int initrd_init(void)
multiboot_module_t* mmodule = NULL;
if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MODS)) {
mmodule = (multiboot_module_t*) mb_info->mods_addr;
mmodule = (multiboot_module_t*) ((size_t) mb_info->mods_addr);
mods_count = mb_info->mods_count;
}
#endif
@ -448,7 +448,7 @@ int initrd_init(void)
/* For every module.. */
#ifdef CONFIG_MULTIBOOT
for(i=0; i<mods_count; i++) {
initrd_header_t* header = (initrd_header_t*) mmodule[i].mod_start;
initrd_header_t* header = (initrd_header_t*) ((size_t) mmodule[i].mod_start);
#elif defined(CONFIG_ROCKCREEK)
for(i=0; i<1; i++) {
initrd_header_t* header = (initrd_header_t*) bootinfo->addr;

View file

@ -31,7 +31,6 @@ extern "C" {
#define DEFAULT_STACK_SIZE (32*1024)
#define KERNEL_STACK_SIZE 8192
#define KMSG_SIZE (128*1024)
#define PAGE_SIZE 4096
#define PAGE_SHIFT 12
#define CACHE_LINE 64
#define MAILBOX_SIZE 8

View file

@ -32,6 +32,10 @@ extern "C" {
typedef unsigned int tid_t;
#define PAGE_SIZE (1 << PAGE_SHIFT)
#define PAGE_MASK ~(PAGE_SIZE - 1)
#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
#if MAX_CORES == 1
#define per_core(name) name
#define DECLARE_PER_CORE(type, name) extern type name;

View file

@ -62,7 +62,6 @@ extern "C" {
#define TASK_L2 (1 << 3)
typedef int (*entry_point_t)(void*);
typedef int (STDCALL *internal_entry_point_t)(void*);
struct page_dir;
/** @brief The task_t structure */

View file

@ -19,6 +19,7 @@
#include <metalsvm/stddef.h>
#include <metalsvm/stdio.h>
#include <metalsvm/stdlib.h>
#include <metalsvm/string.h>
#include <metalsvm/processor.h>
#include <metalsvm/time.h>

View file

@ -245,7 +245,7 @@ void NORETURN abort(void) {
* - 0 on success
* - -ENOMEM (-12) or -EINVAL (-22) on failure
*/
static int create_task(tid_t* id, internal_entry_point_t ep, void* arg, uint8_t prio)
static int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio)
{
task_t* curr_task;
int ret = -ENOMEM;
@ -440,7 +440,7 @@ typedef struct {
/** @brief This call is used to adapt create_task calls
* which want to have a start function and argument list */
static int STDCALL kernel_entry(void* args)
static int kernel_entry(void* args)
{
int ret;
kernel_args_t* kernel_args = (kernel_args_t*) args;
@ -494,6 +494,7 @@ typedef struct {
*/
static int load_task(load_args_t* largs)
{
#ifdef CONFIG_X86_32
uint32_t i, offset, idx, fd_i;
uint32_t addr, npages, flags, stack = 0;
elf_header_t header;
@ -717,11 +718,15 @@ invalid:
kprintf("program entry point 0x%x\n", (size_t) header.entry);
return -EINVAL;
#else
#warning Currently, not supported!
return -EINVAL;
#endif
}
/** @brief This call is used to adapt create_task calls
* which want to have a start function and argument list */
static int STDCALL user_entry(void* arg)
static int user_entry(void* arg)
{
int ret;

View file

@ -37,6 +37,8 @@
* have been added in quad.h.
*/
#if __SIZEOF_POINTER__ == 4
#include "quad.h"
/*
@ -61,3 +63,5 @@ __divdi3(a, b)
uq = __qdivrem(ua, ub, (u_quad_t *)0);
return (neg ? -uq : uq);
}
#endif

View file

@ -31,6 +31,8 @@
* SUCH DAMAGE.
*/
#if __SIZEOF_POINTER__ == 4
/*
* The code has been taken from FreeBSD (sys/libkern/lshrdi3.c) and is therefore
* BSD-licensed. Unnecessary functions have been removed and all typedefs required
@ -61,3 +63,5 @@ __lshrdi3(a, shift)
}
return (aa.q);
}
#endif

View file

@ -31,6 +31,8 @@
* SUCH DAMAGE.
*/
#if __SIZEOF_POINTER__ == 4
/*
* The code has been taken from FreeBSD (sys/libkern/moddi3.c) and is therefore
* BSD-licensed. Unnecessary functions have been removed and all typedefs required
@ -63,3 +65,5 @@ __moddi3(a, b)
(void)__qdivrem(ua, ub, &ur);
return (neg ? -ur : ur);
}
#endif

View file

@ -37,6 +37,8 @@
* have been added in quad.h.
*/
#if __SIZEOF_POINTER__ == 4
/*
* Multiprecision divide. This algorithm is from Knuth vol. 2 (2nd ed),
* section 4.3.1, pp. 257--259.
@ -289,3 +291,5 @@ u_quad_t uq, vq, *arq;
tmp.ul[L] = COMBINE(qspace[3], qspace[4]);
return (tmp.q);
}
#endif

View file

@ -31,6 +31,8 @@
* SUCH DAMAGE.
*/
#if __SIZEOF_POINTER__ == 4
/*
* The code has been taken from FreeBSD (sys/libkern/ucmpdi2.c) and is therefore
* BSD-licensed. Unnecessary functions have been removed and all typedefs required
@ -54,3 +56,5 @@ __ucmpdi2(a, b)
return (aa.ul[H] < bb.ul[H] ? 0 : aa.ul[H] > bb.ul[H] ? 2 :
aa.ul[L] < bb.ul[L] ? 0 : aa.ul[L] > bb.ul[L] ? 2 : 1);
}
#endif

View file

@ -31,6 +31,8 @@
* SUCH DAMAGE.
*/
#if __SIZEOF_POINTER__ == 4
/*
* The code has been taken from FreeBSD (sys/libkern/udivdi3.c) and is therefore
* BSD-licensed. Unnecessary functions have been removed and all typedefs required
@ -48,3 +50,5 @@ u_quad_t a, b;
return (__qdivrem(a, b, (u_quad_t *) 0));
}
#endif

View file

@ -31,6 +31,8 @@
* SUCH DAMAGE.
*/
#if __SIZEOF_POINTER__ == 4
/*
* The code has been taken from FreeBSD (sys/libkern/umoddi3.c) and is therefore
* BSD-licensed. Unnecessary functions have been removed and all typedefs required
@ -50,3 +52,5 @@ u_quad_t a, b;
(void)__qdivrem(a, b, &r);
return (r);
}
#endif

View file

@ -104,7 +104,7 @@ int mmu_init(void)
#ifdef CONFIG_MULTIBOOT
if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MEM_MAP)) {
size_t end_addr;
multiboot_memory_map_t* mmap = (multiboot_memory_map_t*) mb_info->mmap_addr;
multiboot_memory_map_t* mmap = (multiboot_memory_map_t*) ((size_t) mb_info->mmap_addr);
multiboot_memory_map_t* mmap_end = (void*) ((size_t) mb_info->mmap_addr + mb_info->mmap_length);
while (mmap < mmap_end) {
@ -134,7 +134,7 @@ int mmu_init(void)
* Therefore, we set these pages as used.
*/
if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MODS)) {
multiboot_module_t* mmodule = (multiboot_module_t*) mb_info->mods_addr;
multiboot_module_t* mmodule = (multiboot_module_t*) ((size_t) mb_info->mods_addr);
/*
* Mark the mb_info as used.