some smaller codestyle improvements

This commit is contained in:
Steffen Vogel 2014-02-18 12:54:52 +01:00
parent 7a08120fd2
commit 61ec065da1
5 changed files with 61 additions and 52 deletions

View file

@ -27,8 +27,11 @@
#include <metalsvm/malloc.h>
#include <asm/page.h>
#include <asm/irqflags.h>
#include <asm/processor.h>
#include <asm/pmc.h>
#define ITERATIONS 1000
#define PAGE_COUNT 10
#define SIZE (PAGE_COUNT*PAGE_SIZE)
#define VIRT_FROM_ADDR 0x100000000000
@ -83,25 +86,7 @@ uint16_t checksum(size_t start, size_t end)
return sum;
}
static int paging_stage2(void *arg)
{
size_t old, new;
kprintf("PAGING: entering stage 2...\n");
size_t cr3 = read_cr3();
kprintf("cr3 new = %#lx\n", cr3);
old = *((size_t *) arg);
kprintf("old sum: %lu\n", old);
new = checksum(VIRT_FROM_ADDR, VIRT_FROM_ADDR + SIZE);
test(old == new, "checksum(%p, %p) = %lu", VIRT_FROM_ADDR, VIRT_FROM_ADDR + SIZE, new);
page_dump(0, -1L);
return 0;
}
static int paging_stage2(void *arg);
/** @brief Test of the paging subsystem
*
@ -134,7 +119,7 @@ static void paging(void)
// allocate physical page frames
phys = get_pages(PAGE_COUNT);
test(phys, "get_pages(%lu) = 0x%lx", PAGE_COUNT, phys);
test(phys, "get_pages(%lu) = %#lx", PAGE_COUNT, phys);
// create first mapping
virt_from = map_region(VIRT_FROM_ADDR, phys, PAGE_COUNT, 0);
@ -142,7 +127,7 @@ static void paging(void)
// check address translation
phys = virt_to_phys(virt_from);
test(phys, "virt_to_phys(0x%lx) = 0x%lx", virt_from, phys);
test(phys, "virt_to_phys(%#lx) = %#lx", virt_from, phys);
// write test data
p1 = (size_t *) virt_from;
@ -152,15 +137,15 @@ static void paging(void)
// create second mapping pointing to the same page frames
virt_to = map_region(VIRT_TO_ADDR, phys, PAGE_COUNT, MAP_USER_SPACE);
test(virt_to, "map_region(0x%lx, 0x%lx, %lu, 0x%x) = 0x%lx", VIRT_TO_ADDR, phys, PAGE_COUNT, 0, virt_to);
// show pagings infos again
page_dump(0, -1L);
page_stats(0, -1L, 0);
test(virt_to, "map_region(%#lx, %#lx, %lu, %#x) = %#lx", VIRT_TO_ADDR, phys, PAGE_COUNT, 0, virt_to);
// check address translation
phys = virt_to_phys(virt_to);
test(phys, "virt_to_phys(0x%lx) = 0x%lx", virt_to, phys);
test(phys, "virt_to_phys(%#lx) = %#lx", virt_to, phys);
// check if both mapped areas are equal
p2 = (size_t *) virt_to;
@ -172,11 +157,11 @@ static void paging(void)
// try to remap without MAP_REMAP
virt_to = map_region(VIRT_TO_ADDR, phys+PAGE_SIZE, PAGE_COUNT, MAP_USER_SPACE);
test(!virt_to, "map_region(0x%lx, 0x%lx, %lu, 0x%x) = 0x%lx (without MAP_REMAP flag)", VIRT_TO_ADDR, phys+PAGE_SIZE, PAGE_COUNT, 0, virt_to);
test(!virt_to, "map_region(%#lx, %#lx, %lu, %#x) = %#lx (without MAP_REMAP flag)", VIRT_TO_ADDR, phys+PAGE_SIZE, PAGE_COUNT, 0, virt_to);
// try to remap with MAP_REMAP
virt_to = map_region(VIRT_TO_ADDR, phys+PAGE_SIZE, PAGE_COUNT, MAP_REMAP|MAP_USER_SPACE);
test(virt_to, "map_region(0x%lx, 0x%lx, %lu, 0x%x) = 0x%lx (with MAP_REMAP flag)", VIRT_TO_ADDR, phys+PAGE_SIZE, PAGE_COUNT, MAP_REMAP, virt_to);
test(virt_to, "map_region(%#lx, %#lx, %lu, %#x) = %#lx (with MAP_REMAP flag)", VIRT_TO_ADDR, phys+PAGE_SIZE, PAGE_COUNT, MAP_REMAP, virt_to);
// check if data is not equal anymore (we remapped with +PAGE_SIZE offset)
p2 = (size_t *) virt_to;
@ -203,6 +188,26 @@ static void paging(void)
sleep(5);
}
static int paging_stage2(void *arg)
{
size_t old, new;
kprintf("PAGING: entering stage 2...\n");
size_t cr3 = read_cr3();
kprintf("cr3 new = %#lx\n", cr3);
old = *((size_t *) arg);
kprintf("old sum: %lu\n", old);
page_dump(PG_XD | PG_GLOBAL | PG_USER | PG_RW);
new = checksum(VIRT_TO_ADDR, VIRT_TO_ADDR + SIZE);
test(old == new, "checksum(%p, %p) = %lu", VIRT_TO_ADDR, VIRT_TO_ADDR + SIZE, new);
return 0;
}
/** @brief Test of the VMA allocator */
static void vma(void)
{
@ -211,40 +216,40 @@ static void vma(void)
// vma_alloc
size_t a1 = vma_alloc(SIZE, VMA_HEAP);
test(a1, "vma_alloc(0x%x, 0x%x) = 0x%lx", SIZE, VMA_HEAP, a1);
test(a1, "vma_alloc(%#x, %#x) = %#lx", SIZE, VMA_HEAP, a1);
size_t a2 = vma_alloc(SIZE, VMA_HEAP|VMA_USER);
test(a2 != 0, "vma_alloc(0x%x, 0x%x) = 0x%lx", SIZE, VMA_HEAP|VMA_USER, a2);
test(a2 != 0, "vma_alloc(%#x, %#x) = %#lx", SIZE, VMA_HEAP|VMA_USER, a2);
vma_dump();
// vma_free
ret = vma_free(a1, a1+SIZE);
test(ret >= 0, "vma_free(0x%lx, 0x%lx) = %i", a1, a1+SIZE, ret);
test(ret >= 0, "vma_free(%#lx, %#lx) = %i", a1, a1+SIZE, ret);
ret = vma_free(a2, a2+SIZE);
test(ret >= 0, "vma_free(0x%lx, 0x%lx) = %i", a2, a2+SIZE, ret);
test(ret >= 0, "vma_free(%#lx, %#lx) = %i", a2, a2+SIZE, ret);
vma_dump();
// vma_add
ret = vma_add(VIRT_FROM_ADDR, VIRT_FROM_ADDR+SIZE, VMA_HEAP|VMA_USER);
test(ret >= 0, "vma_add(0x%lx, 0x%lx, 0x%x) = %u", VIRT_FROM_ADDR, VIRT_FROM_ADDR+SIZE, VMA_HEAP|VMA_USER, ret);
test(ret >= 0, "vma_add(%#lx, %#lx, %#x) = %u", VIRT_FROM_ADDR, VIRT_FROM_ADDR+SIZE, VMA_HEAP|VMA_USER, ret);
ret = vma_add(VIRT_FROM_ADDR+SIZE, VIRT_FROM_ADDR+2*SIZE, VMA_HEAP|VMA_USER);
test(ret >= 0, "vma_add(0x%lx, 0x%lx, 0x%x) = %u", VIRT_FROM_ADDR+SIZE, VIRT_FROM_ADDR+2*SIZE, VMA_HEAP|VMA_USER, ret);
test(ret >= 0, "vma_add(%#lx, %#lx, %#x) = %u", VIRT_FROM_ADDR+SIZE, VIRT_FROM_ADDR+2*SIZE, VMA_HEAP|VMA_USER, ret);
ret = vma_add(VIRT_FROM_ADDR-SIZE, VIRT_FROM_ADDR, VMA_HEAP|VMA_USER);
test(ret >= 0, "vma_add(0x%lx, 0x%lx, 0x%x) = %u", VIRT_FROM_ADDR-SIZE, VIRT_FROM_ADDR, VMA_HEAP|VMA_USER, ret);
test(ret >= 0, "vma_add(%#lx, %#lx, %#x) = %u", VIRT_FROM_ADDR-SIZE, VIRT_FROM_ADDR, VMA_HEAP|VMA_USER, ret);
vma_dump();
// vma_free
ret = vma_free(VIRT_FROM_ADDR-SIZE, VIRT_FROM_ADDR);
test(ret >= 0, "vma_free(0x%lx, 0x%lx) = %u", VIRT_FROM_ADDR-SIZE, VIRT_FROM_ADDR, ret);
test(ret >= 0, "vma_free(%#lx, %#lx) = %u", VIRT_FROM_ADDR-SIZE, VIRT_FROM_ADDR, ret);
ret = vma_free(VIRT_FROM_ADDR+SIZE, VIRT_FROM_ADDR+2*SIZE);
test(ret >= 0, "vma_free(0x%lx, 0x%lx) = %u", VIRT_FROM_ADDR+SIZE, VIRT_FROM_ADDR+2*SIZE, ret);
test(ret >= 0, "vma_free(%#lx, %#lx) = %u", VIRT_FROM_ADDR+SIZE, VIRT_FROM_ADDR+2*SIZE, ret);
ret = vma_free(VIRT_FROM_ADDR, VIRT_FROM_ADDR+SIZE);
test(ret >= 0, "vma_free(0x%lx, 0x%lx) = %u", VIRT_FROM_ADDR, VIRT_FROM_ADDR+SIZE, ret);
test(ret >= 0, "vma_free(%#lx, %#lx) = %u", VIRT_FROM_ADDR, VIRT_FROM_ADDR+SIZE, ret);
vma_dump();
}

View file

@ -84,13 +84,13 @@ GDT64: ; Global Descriptor Table (64-bit).
times 256 DD 0 ; Stack for booting
startup_stack:
PAGE_MAP_ENTRIES equ (1<<9)
PAGE_SIZE equ (1<<12)
SECTION .data
; Create default page tables for the 64bit kernel
global boot_pml4
ALIGN 4096 ; of course, the page tables have to be page aligned
PAGE_MAP_ENTRIES equ (1<<9)
PAGE_SIZE equ (1<<12)
ALIGN PAGE_SIZE ; of course, the page tables have to be page aligned
boot_pml4 times PAGE_MAP_ENTRIES DQ 0
boot_pdpt times PAGE_MAP_ENTRIES DQ 0
@ -751,7 +751,7 @@ isrsyscall:
mov ax, 0x10
mov ds, ax
; x86-64 ABI calling convention
; x86-64 ABI calling convention (see newlib/src/libgloss/syscall.h)
mov r8, rbx
mov r9, rax
mov rax, 0 ; we've not used vector registers for this va_arg call

View file

@ -102,7 +102,7 @@ int cpu_detection(void)
}
if (first_time) {
kprintf("Paging features: %s%s%s%s%s%s%s%s",
kprintf("Paging features: %s%s%s%s%s%s%s%s\n",
(cpu_info.feature1 & CPU_FEATUE_PSE) ? "PSE (2/4Mb) " : "",
(cpu_info.feature1 & CPU_FEATURE_PAE) ? "PAE " : "",
(cpu_info.feature1 & CPU_FEATURE_PGE) ? "PGE " : "",

View file

@ -296,13 +296,13 @@ int mmu_init(void)
// enable paging and map SMP, VGA, Multiboot modules etc.
ret = paging_init();
if (ret) {
if (BUILTIN_EXPECT(ret, 0)) {
kprintf("Failed to initialize paging: %d\n", ret);
return ret;
}
ret = vma_init();
if (ret) {
if (BUILTIN_EXPECT(ret, 0)) {
kprintf("Failed to initialize VMA regions: %d\n", ret);
return ret;
}

View file

@ -58,29 +58,29 @@ int vma_init()
ret = vma_add(PAGE_CEIL((size_t) &kernel_start),
PAGE_FLOOR((size_t) &kernel_end),
VMA_READ|VMA_WRITE|VMA_EXECUTE|VMA_CACHEABLE);
if (ret)
if (BUILTIN_EXPECT(ret, 0))
goto out;
// add MP Table
ret = vma_add(PAGE_CEIL((size_t) apic_mp),
PAGE_FLOOR((size_t) apic_mp + sizeof(apic_mp_t)),
VMA_READ|VMA_WRITE|VMA_EXECUTE|VMA_CACHEABLE);
if (ret)
if (BUILTIN_EXPECT(ret, 0))
goto out;
// add IOAPIC and LAPIC memory mapped registers
ret = vma_add(LAPIC_ADDR, LAPIC_ADDR + PAGE_SIZE, VMA_READ|VMA_WRITE);
if (ret)
if (BUILTIN_EXPECT(ret, 0))
goto out;
ret = vma_add(IOAPIC_ADDR, IOAPIC_ADDR + PAGE_SIZE, VMA_READ|VMA_WRITE);
if (ret)
if (BUILTIN_EXPECT(ret, 0))
goto out;
#ifdef CONFIG_VGA
// add VGA video memory
ret = vma_add(VIDEO_MEM_ADDR, VIDEO_MEM_ADDR + PAGE_SIZE, VMA_READ|VMA_WRITE);
if (ret)
if (BUILTIN_EXPECT(ret, 0))
goto out;
#endif
@ -88,7 +88,7 @@ int vma_init()
// add SMP boot page
ret = vma_add(SMP_SETUP_ADDR, SMP_SETUP_ADDR + PAGE_SIZE,
VMA_READ|VMA_WRITE|VMA_EXECUTE|VMA_CACHEABLE);
if (ret)
if (BUILTIN_EXPECT(ret, 0))
goto out;
#endif
@ -98,7 +98,7 @@ int vma_init()
ret = vma_add(PAGE_CEIL((size_t) mb_info),
PAGE_FLOOR((size_t) mb_info + sizeof(multiboot_info_t)),
VMA_READ|VMA_CACHEABLE);
if (ret)
if (BUILTIN_EXPECT(ret, 0))
goto out;
if (mb_info->flags & MULTIBOOT_INFO_MEM_MAP) {
@ -119,7 +119,7 @@ int vma_init()
ret = vma_add(PAGE_CEIL(mmodule[i].mod_start),
PAGE_FLOOR(mmodule[i].mod_end),
VMA_READ|VMA_WRITE|VMA_CACHEABLE);
if (ret)
if (BUILTIN_EXPECT(ret, 0))
goto out;
}
}
@ -136,7 +136,7 @@ size_t vma_alloc(size_t size, uint32_t flags)
spinlock_t* lock;
vma_t** list;
kprintf("vma_alloc(0x%lx, 0x%x)\n", size, flags);
kprintf("vma_alloc: size = %#lx, flags = %#x\n", size, flags); // TODO: remove
size_t base, limit; // boundaries for search
size_t start, end;
@ -216,6 +216,8 @@ int vma_free(size_t start, size_t end)
vma_t* vma;
vma_t** list;
kprintf("vma_free: start = %#lx, end = %#lx\n", start, end); // TODO: remove
if (BUILTIN_EXPECT(start >= end, 0))
return -EINVAL;
@ -306,6 +308,8 @@ int vma_add(size_t start, size_t end, uint32_t flags)
return -EINVAL;
}
kprintf("vma_add: start = %#lx, end = %#lx, flags = %#x\n", start, end, flags); // TODO: remove
spinlock_lock(lock);
// search gap