more cleanup of old 32bit code relicts, typos and indention

This commit is contained in:
Steffen Vogel 2013-11-14 12:22:52 +01:00
parent 90d884ec8d
commit 9441d21d89
5 changed files with 15 additions and 65 deletions

View file

@ -29,7 +29,6 @@
SECTION .mboot
global start
start:
mov byte [msg], 'H'
jmp stublet
; This part MUST be 4byte aligned, so we solve that issue using 'ALIGN 4'
@ -38,10 +37,10 @@ mboot:
; Multiboot macros to make a few lines more readable later
MULTIBOOT_PAGE_ALIGN equ 1<<0
MULTIBOOT_MEMORY_INFO equ 1<<1
; MULTIBOOT_AOUT_KLUDGE equ 1<<16
; MULTIBOOT_AOUT_KLUDGE equ 1<<16
MULTIBOOT_HEADER_MAGIC equ 0x1BADB002
MULTIBOOT_HEADER_FLAGS equ MULTIBOOT_PAGE_ALIGN | MULTIBOOT_MEMORY_INFO ; | MULTIBOOT_AOUT_KLUDGE
MULTIBOOT_CHECKSUM equ -(MULTIBOOT_HEADER_MAGIC + MULTIBOOT_HEADER_FLAGS)
MULTIBOOT_CHECKSUM equ -(MULTIBOOT_HEADER_MAGIC + MULTIBOOT_HEADER_FLAGS)
EXTERN code, bss, end
; This is the GRUB Multiboot header. A boot signature
@ -49,8 +48,6 @@ mboot:
dd MULTIBOOT_HEADER_FLAGS
dd MULTIBOOT_CHECKSUM
msg db "?ello from MetalSVM kernel!!", 0
SECTION .text
ALIGN 4
stublet:

View file

@ -317,11 +317,11 @@ init_paging:
mov cr3, edi
mov DWORD [edi], boot_pdpt
or DWORD [edi], 0x3
or DWORD [edi], 0x07 ; set present, user and writable flags
add edi, (PAGE_MAP_ENTRIES-1)*8 ; setup recursive paging
mov DWORD [edi], boot_pml4 ; boot_pml4[511] -> boot_pml4
or DWORD [edi], 0x3 ; set present and writable flags
or DWORD [edi], 0x03 ; set present and writable flags
mov edi, boot_pdpt
mov DWORD [edi], boot_pgd
@ -332,7 +332,7 @@ init_paging:
mov ecx, PAGE_MAP_ENTRIES ; map all boot_pgt to the kernel space
.l1:
mov DWORD [edi], ebx
or DWORD [edi], 0x3 ; set present and writable flags
or DWORD [edi], 0x03 ; set present and writable flags
add edi, 8
add ebx, 0x1000
loop .l1

View file

@ -42,38 +42,4 @@ L3:
pop rax
ret
%if 0
; The following function is derived from JamesM's kernel development tutorials
; (http://www.jamesmolloy.co.uk/tutorial_html/)
global copy_page_physical
copy_page_physical:
push esi ; According to __cdecl, we must preserve the contents of ESI
push edi ; and EDI.
pushf ; push EFLAGS, so we can pop it and reenable interrupts
; later, if they were enabled anyway.
cli ; Disable interrupts, so we aren't interrupted.
; Load these in BEFORE we disable paging!
mov edi, [esp+12+4] ; Destination address
mov esi, [esp+12+8] ; Source address
mov edx, cr0 ; Get the control register...
and edx, 0x7fffffff ; and...
mov cr0, edx ; Disable paging.
cld
mov ecx, 0x400 ; 1024*4bytes = 4096 bytes = page size
rep movsd ; copy page
mov edx, cr0 ; Get the control register again
or edx, 0x80000000 ; and...
mov cr0, edx ; Enable paging.
popf ; Pop EFLAGS back.
pop edi ; Get the original value of EDI
pop esi ; and ESI back.
ret
%endif
SECTION .note.GNU-stack noalloc noexec nowrite progbits

View file

@ -58,7 +58,7 @@ extern const void kernel_end;
// boot task's page directory and page directory lock
static page_map_t boot_pgd = {{[0 ... MAP_ENTRIES-1] = 0}};
static page_map_t boot_pgt[KERNEL_SPACE/(1024*PAGE_SIZE)];
static page_map_t boot_pgt[KERNEL_SPACE/(MAP_ENTRIES*PAGE_SIZE)];
static page_map_t pgt_container = {{[0 ... MAP_ENTRIES-1] = 0}};
static spinlock_t kslock = SPINLOCK_INIT;
@ -72,7 +72,7 @@ page_map_t* get_boot_page_map(void)
/*
* TODO: We create a full copy of the current task. Copy-On-Access will be the better solution.
*
* No PGD locking is needed because only create_pgd use this function and holds already the
* No PGD locking is needed because only create_page_map use this function and holds already the
* PGD lock.
*/
inline static size_t copy_page_table(task_t* task, uint32_t pgd_index, page_map_t* pgt, int* counter)

View file

@ -39,7 +39,7 @@
* 0x000000000000 - 0x0000000FFFFF: reserved for IO devices (16MB)
* 0x000000100000 - 0x00000DEADFFF: Kernel (size depends on the configuration) (221MB)
* 0x00000DEAE000 - 0x00003FFFFFFF: Kernel heap
* 0xFF8000000000 - 0xFFFFFFFFFFFF: Paging structures are mapped in this region (1GB)
* 0xFF8000000000 - 0xFFFFFFFFFFFF: Paging structures are mapped in this region (max 512GB)
*/
/*
@ -49,15 +49,9 @@
extern const void kernel_start;
extern const void kernel_end;
// boot task's page directory and page directory lock
// boot task's page map and page map lock
extern page_map_t boot_pml4;
static spinlock_t kslock = SPINLOCK_INIT;
static int paging_enabled = 0;
/*static page_map_t boot_pml4 = {{[0 ... MAP_ENTRIES-1] = 0}};
static page_map_t boot_pdpt = {{[0 ... MAP_ENTRIES-1] = 0}};
static page_map_t boot_pgd = {{[0 ... MAP_ENTRIES-1] = 0}};
static page_map_t boot_pgt = {{[0 ... MAP_ENTRIES-1] = 0}};*/
page_map_t* get_boot_page_map(void)
{
@ -164,9 +158,6 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
if (BUILTIN_EXPECT(!task || !task->page_map, 0))
return 0;
if (BUILTIN_EXPECT(!paging_enabled && (viraddr != phyaddr), 0))
return 0;
if (flags & MAP_KERNEL_SPACE)
spinlock_lock(&kslock);
else
@ -260,9 +251,6 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
page_map_t* pgd;
task_t* task = per_core(current_task);
if (BUILTIN_EXPECT(!paging_enabled, 0))
return -EINVAL;
pgd = per_core(current_task)->page_map;
if (BUILTIN_EXPECT(!pgd, 0))
return -EINVAL;
@ -404,7 +392,7 @@ int unmap_region(size_t viraddr, uint32_t npages)
uint16_t index_pml4, index_pdpt;
uint16_t index_pgd, index_pgt;
if (BUILTIN_EXPECT(!task || !task->page_map || !paging_enabled, 0))
if (BUILTIN_EXPECT(!task || !task->page_map, 0))
return -EINVAL;
if (viraddr <= KERNEL_SPACE)
@ -571,8 +559,10 @@ int arch_paging_init(void)
irq_uninstall_handler(14);
irq_install_handler(14, pagefault_handler);
// kernel is already maped into the kernel space (see entry64.asm)
// this includes .data, .bss, .text, video memory and the multiboot structure
/*
* In longmode the kernel is already maped into the kernel space (see entry64.asm)
* this includes .data, .bss, .text, VGA, the multiboot & multiprocessing (APIC) structures
*/
#if MAX_CORES > 1
// Reserve page for smp boot code
@ -605,7 +595,7 @@ int arch_paging_init(void)
/*
* Modules like the init ram disk are already loaded.
* Therefore, we map these moduels into the kernel space.
* Therefore, we map these modules into the kernel space.
*/
if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MODS)) {
multiboot_module_t* mmodule = (multiboot_module_t*) ((size_t) mb_info->mods_addr);
@ -626,9 +616,6 @@ int arch_paging_init(void)
}
#endif
/* signalize that we are able to use paging */
paging_enabled = 1;
/*
* we turned on paging
* => now, we are able to register our task