1
0
Fork 0
mirror of https://github.com/hermitcore/libhermit.git synced 2025-03-30 00:00:15 +01:00

fix typos, fix meaing of PAGE_CEIL and PAGE_FLOOR

This commit is contained in:
Stefan Lankes 2017-07-15 16:24:08 +02:00
parent 6f34ac09a8
commit 437f445ed8
13 changed files with 50 additions and 50 deletions

View file

@ -102,14 +102,14 @@ static inline size_t sign_extend(ssize_t addr, int bits)
#define PAGE_MAP_ENTRIES (1L << PAGE_MAP_BITS) #define PAGE_MAP_ENTRIES (1L << PAGE_MAP_BITS)
/// Align to next page /// Align to next page
#define PAGE_FLOOR(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK) #define PAGE_CEIL(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
/// Align to page /// Align to page
#define PAGE_CEIL(addr) ( (addr) & PAGE_MASK) #define PAGE_FLOOR(addr) ( (addr) & PAGE_MASK)
/// Align to next 2M boundary /// Align to next 2M boundary
#define PAGE_2M_FLOOR(addr) (((addr) + (1L << 21) - 1) & ((~0L) << 21)) #define PAGE_2M_CEIL(addr) (((addr) + (1L << 21) - 1) & ((~0L) << 21))
/// Align to nex 2M boundary /// Align to nex 2M boundary
#define PAGE_2M_CEIL(addr) ( (addr) & ((~0L) << 21)) #define PAGE_2M_FLOOR(addr) ( (addr) & ((~0L) << 21))
/// Page is present /// Page is present
#define PG_PRESENT (1 << 0) #define PG_PRESENT (1 << 0)

View file

@ -364,7 +364,7 @@ int apic_enable_timer(void)
} }
static apic_mp_t* search_mptable(size_t base, size_t limit) { static apic_mp_t* search_mptable(size_t base, size_t limit) {
size_t ptr=PAGE_CEIL(base), vptr=0; size_t ptr=PAGE_FLOOR(base), vptr=0;
size_t flags = PG_GLOBAL | PG_RW | PG_PCD; size_t flags = PG_GLOBAL | PG_RW | PG_PCD;
apic_mp_t* tmp; apic_mp_t* tmp;
uint32_t i; uint32_t i;
@ -410,7 +410,7 @@ static apic_mp_t* search_mptable(size_t base, size_t limit) {
#if 0 #if 0
static size_t search_ebda(void) { static size_t search_ebda(void) {
size_t ptr=PAGE_CEIL(0x400), vptr=0xF0000; size_t ptr=PAGE_FLOOR(0x400), vptr=0xF0000;
size_t flags = PG_GLOBAL | PG_RW | PG_PCD; size_t flags = PG_GLOBAL | PG_RW | PG_PCD;
// protec apic by the NX flags // protec apic by the NX flags
@ -580,8 +580,8 @@ int smp_init(void)
* Wakeup the other cores via IPI. They start at this address * Wakeup the other cores via IPI. They start at this address
* in real mode, switch to protected and finally they jump to smp_main. * in real mode, switch to protected and finally they jump to smp_main.
*/ */
page_map(SMP_SETUP_ADDR, SMP_SETUP_ADDR, PAGE_FLOOR(sizeof(boot_code)) >> PAGE_BITS, PG_RW|PG_GLOBAL); page_map(SMP_SETUP_ADDR, SMP_SETUP_ADDR, PAGE_CEIL(sizeof(boot_code)) >> PAGE_BITS, PG_RW|PG_GLOBAL);
vma_add(SMP_SETUP_ADDR, SMP_SETUP_ADDR + PAGE_FLOOR(sizeof(boot_code)), VMA_READ|VMA_WRITE|VMA_CACHEABLE); vma_add(SMP_SETUP_ADDR, SMP_SETUP_ADDR + PAGE_CEIL(sizeof(boot_code)), VMA_READ|VMA_WRITE|VMA_CACHEABLE);
memcpy((void*)SMP_SETUP_ADDR, boot_code, sizeof(boot_code)); memcpy((void*)SMP_SETUP_ADDR, boot_code, sizeof(boot_code));
for(i=0; i<sizeof(boot_code); i++) for(i=0; i<sizeof(boot_code); i++)

View file

@ -86,12 +86,12 @@ static inline size_t sign_extend(ssize_t addr, int bits)
#endif #endif
/// The number of entries in a page map table /// The number of entries in a page map table
#define PAGE_MAP_ENTRIES (1L << PAGE_MAP_BITS) #define PAGE_MAP_ENTRIES (1L << PAGE_MAP_BITS)
/// Align to next page /// Align to next page
#define PAGE_FLOOR(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK) #define PAGE_CEIL(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
/// Align to page /// Align to page
#define PAGE_CEIL(addr) ( (addr) & PAGE_MASK) #define PAGE_FLOOR(addr) ( (addr) & PAGE_MASK)
/// Page is present /// Page is present
#define PG_PRESENT (1 << 0) #define PG_PRESENT (1 << 0)

View file

@ -183,7 +183,7 @@ void main(void)
viraddr = prog_header->virt_addr; viraddr = prog_header->virt_addr;
if (!phyaddr) if (!phyaddr)
phyaddr = prog_header->offset + (size_t)header; phyaddr = prog_header->offset + (size_t)header;
file_size = prog_header->virt_addr + PAGE_FLOOR(prog_header->file_size) - viraddr; file_size = prog_header->virt_addr + PAGE_CEIL(prog_header->file_size) - viraddr;
mem_size += prog_header->mem_size; mem_size += prog_header->mem_size;
} }
break; break;

View file

@ -45,7 +45,7 @@ extern const void kernel_start;
extern const void kernel_end; extern const void kernel_end;
/// This page is reserved for copying /// This page is reserved for copying
#define PAGE_TMP (PAGE_FLOOR((size_t) &kernel_start) - PAGE_SIZE) #define PAGE_TMP (PAGE_CEIL((size_t) &kernel_start) - PAGE_SIZE)
/** This PGD table is initialized in entry.asm */ /** This PGD table is initialized in entry.asm */
extern size_t* boot_map; extern size_t* boot_map;
@ -188,12 +188,12 @@ int page_init(void)
// already mapped => entry.asm // already mapped => entry.asm
//addr = (size_t) mb_info & PAGE_MASK; //addr = (size_t) mb_info & PAGE_MASK;
//npages = PAGE_FLOOR(sizeof(*mb_info)) >> PAGE_BITS; //npages = PAGE_CEIL(sizeof(*mb_info)) >> PAGE_BITS;
//page_map(addr, addr, npages, PG_GLOBAL); //page_map(addr, addr, npages, PG_GLOBAL);
if (mb_info->flags & MULTIBOOT_INFO_MODS) { if (mb_info->flags & MULTIBOOT_INFO_MODS) {
addr = mb_info->mods_addr; addr = mb_info->mods_addr;
npages = PAGE_FLOOR(mb_info->mods_count*sizeof(multiboot_module_t)) >> PAGE_BITS; npages = PAGE_CEIL(mb_info->mods_count*sizeof(multiboot_module_t)) >> PAGE_BITS;
ret = page_map(addr, addr, npages, PG_GLOBAL); ret = page_map(addr, addr, npages, PG_GLOBAL);
kprintf("Map module info at 0x%lx (ret %d)\n", addr, ret); kprintf("Map module info at 0x%lx (ret %d)\n", addr, ret);
@ -202,14 +202,14 @@ int page_init(void)
// at first we determine the first free page // at first we determine the first free page
for(int i=0; i<mb_info->mods_count; i++) { for(int i=0; i<mb_info->mods_count; i++) {
if (first_page < mmodule[i].mod_end) if (first_page < mmodule[i].mod_end)
first_page = PAGE_FLOOR(mmodule[i].mod_end); first_page = PAGE_CEIL(mmodule[i].mod_end);
} }
// we map only the first page of each module (= ELF file) because // we map only the first page of each module (= ELF file) because
// we need only the program header of the ELF file // we need only the program header of the ELF file
for(int i=0; i<mb_info->mods_count; i++) { for(int i=0; i<mb_info->mods_count; i++) {
addr = mmodule[i].mod_start; addr = mmodule[i].mod_start;
npages = PAGE_FLOOR(mmodule[i].mod_end - mmodule[i].mod_start) >> PAGE_BITS; npages = PAGE_CEIL(mmodule[i].mod_end - mmodule[i].mod_start) >> PAGE_BITS;
ret = page_map(addr, addr, 1 /*npages*/, PG_GLOBAL); ret = page_map(addr, addr, 1 /*npages*/, PG_GLOBAL);
kprintf("Map first page of module %d at 0x%lx (ret %d)\n", i, addr, ret); kprintf("Map first page of module %d at 0x%lx (ret %d)\n", i, addr, ret);
kprintf("Module %d consists %zd\n", i, npages); kprintf("Module %d consists %zd\n", i, npages);

View file

@ -194,13 +194,13 @@ void* page_alloc(size_t sz, uint32_t flags)
{ {
size_t viraddr = 0; size_t viraddr = 0;
size_t phyaddr; size_t phyaddr;
uint32_t npages = PAGE_FLOOR(sz) >> PAGE_BITS; uint32_t npages = PAGE_CEIL(sz) >> PAGE_BITS;
size_t pflags = PG_PRESENT|PG_GLOBAL|PG_XD; size_t pflags = PG_PRESENT|PG_GLOBAL|PG_XD;
if (BUILTIN_EXPECT(!npages, 0)) if (BUILTIN_EXPECT(!npages, 0))
goto oom; goto oom;
viraddr = vma_alloc(PAGE_FLOOR(sz), flags); viraddr = vma_alloc(PAGE_CEIL(sz), flags);
if (BUILTIN_EXPECT(!viraddr, 0)) if (BUILTIN_EXPECT(!viraddr, 0))
goto oom; goto oom;
@ -238,10 +238,10 @@ void page_free(void* viraddr, size_t sz)
phyaddr = virt_to_phys((size_t)viraddr); phyaddr = virt_to_phys((size_t)viraddr);
vma_free((size_t) viraddr, (size_t) viraddr + PAGE_FLOOR(sz)); vma_free((size_t) viraddr, (size_t) viraddr + PAGE_CEIL(sz));
if (phyaddr) if (phyaddr)
put_pages(phyaddr, PAGE_FLOOR(sz) >> PAGE_BITS); put_pages(phyaddr, PAGE_CEIL(sz) >> PAGE_BITS);
} }
int memory_init(void) int memory_init(void)
@ -267,13 +267,13 @@ int memory_init(void)
// mark first available memory slot as free // mark first available memory slot as free
for(; mmap < mmap_end; mmap = (multiboot_memory_map_t*) ((size_t) mmap + sizeof(uint32_t) + mmap->size)) { for(; mmap < mmap_end; mmap = (multiboot_memory_map_t*) ((size_t) mmap + sizeof(uint32_t) + mmap->size)) {
if (mmap->type == MULTIBOOT_MEMORY_AVAILABLE) { if (mmap->type == MULTIBOOT_MEMORY_AVAILABLE) {
start_addr = PAGE_FLOOR(mmap->addr); start_addr = PAGE_CEIL(mmap->addr);
end_addr = PAGE_CEIL(mmap->addr + mmap->len); end_addr = PAGE_FLOOR(mmap->addr + mmap->len);
LOG_INFO("Free region 0x%zx - 0x%zx\n", start_addr, end_addr); LOG_INFO("Free region 0x%zx - 0x%zx\n", start_addr, end_addr);
if ((start_addr <= base) && (end_addr >= PAGE_2M_FLOOR((size_t) &kernel_start + image_size))) { if ((start_addr <= base) && (end_addr >= PAGE_2M_FLOOR((size_t) &kernel_start + image_size))) {
init_list.start = PAGE_2M_FLOOR((size_t) &kernel_start + image_size); init_list.start = PAGE_2M_CEIL((size_t) &kernel_start + image_size);
init_list.end = end_addr; init_list.end = end_addr;
LOG_INFO("Add region 0x%zx - 0x%zx\n", init_list.start, init_list.end); LOG_INFO("Add region 0x%zx - 0x%zx\n", init_list.start, init_list.end);
@ -295,13 +295,13 @@ int memory_init(void)
atomic_int64_add(&total_pages, (limit-base) >> PAGE_BITS); atomic_int64_add(&total_pages, (limit-base) >> PAGE_BITS);
atomic_int64_add(&total_available_pages, (limit-base) >> PAGE_BITS); atomic_int64_add(&total_available_pages, (limit-base) >> PAGE_BITS);
init_list.start = PAGE_2M_FLOOR(base + image_size); init_list.start = PAGE_2M_CEIL(base + image_size);
init_list.end = limit; init_list.end = limit;
} }
// determine allocated memory, we use 2MB pages to map the kernel // determine allocated memory, we use 2MB pages to map the kernel
atomic_int64_add(&total_allocated_pages, PAGE_2M_FLOOR(image_size) >> PAGE_BITS); atomic_int64_add(&total_allocated_pages, PAGE_2M_CEIL(image_size) >> PAGE_BITS);
atomic_int64_sub(&total_available_pages, PAGE_2M_FLOOR(image_size) >> PAGE_BITS); atomic_int64_sub(&total_available_pages, PAGE_2M_CEIL(image_size) >> PAGE_BITS);
LOG_INFO("free list starts at 0x%zx, limit 0x%zx\n", init_list.start, init_list.end); LOG_INFO("free list starts at 0x%zx, limit 0x%zx\n", init_list.start, init_list.end);
@ -324,10 +324,10 @@ int memory_init(void)
for(; mmap < mmap_end; mmap = (multiboot_memory_map_t*) ((size_t) mmap + sizeof(uint32_t) + mmap->size)) for(; mmap < mmap_end; mmap = (multiboot_memory_map_t*) ((size_t) mmap + sizeof(uint32_t) + mmap->size))
{ {
if (mmap->type == MULTIBOOT_MEMORY_AVAILABLE) { if (mmap->type == MULTIBOOT_MEMORY_AVAILABLE) {
start_addr = PAGE_FLOOR(mmap->addr); start_addr = PAGE_CEIL(mmap->addr);
end_addr = PAGE_CEIL(mmap->addr + mmap->len); end_addr = PAGE_FLOOR(mmap->addr + mmap->len);
if ((start_addr <= base) && (end_addr >= PAGE_2M_FLOOR(base+image_size))) if ((start_addr <= base) && (end_addr >= PAGE_2M_CEIL(base+image_size)))
end_addr = base; end_addr = base;
// ignore everything below 1M => reserve for I/O devices // ignore everything below 1M => reserve for I/O devices
@ -335,11 +335,11 @@ int memory_init(void)
start_addr = GAP_BELOW; start_addr = GAP_BELOW;
if (start_addr < (size_t)mb_info) if (start_addr < (size_t)mb_info)
start_addr = PAGE_FLOOR((size_t)mb_info); start_addr = PAGE_CEIL((size_t)mb_info);
if ((mb_info->flags & MULTIBOOT_INFO_CMDLINE) && cmdline) { if ((mb_info->flags & MULTIBOOT_INFO_CMDLINE) && cmdline) {
if (start_addr < (size_t) cmdline+cmdsize) if (start_addr < (size_t) cmdline+cmdsize)
start_addr = PAGE_FLOOR((size_t) cmdline+cmdsize); start_addr = PAGE_CEIL((size_t) cmdline+cmdsize);
} }
if (start_addr >= end_addr) if (start_addr >= end_addr)

View file

@ -370,11 +370,11 @@ err_t e1000if_init(struct netif* netif)
netif->state = e1000if; netif->state = e1000if;
mynetif = netif; mynetif = netif;
e1000if->bar0 = (uint8_t*) vma_alloc(PAGE_FLOOR(pci_info.size[0]), VMA_READ|VMA_WRITE); e1000if->bar0 = (uint8_t*) vma_alloc(PAGE_CEIL(pci_info.size[0]), VMA_READ|VMA_WRITE);
if (BUILTIN_EXPECT(!e1000if->bar0, 0)) if (BUILTIN_EXPECT(!e1000if->bar0, 0))
goto oom; goto oom;
int ret = page_map((size_t)e1000if->bar0, PAGE_CEIL(pci_info.base[0]), PAGE_FLOOR(pci_info.size[0]) >> PAGE_BITS, PG_GLOBAL|PG_RW|PG_PCD); int ret = page_map((size_t)e1000if->bar0, PAGE_FLOOR(pci_info.base[0]), PAGE_CEIL(pci_info.size[0]) >> PAGE_BITS, PG_GLOBAL|PG_RW|PG_PCD);
if (BUILTIN_EXPECT(ret, 0)) if (BUILTIN_EXPECT(ret, 0))
goto oom; goto oom;

View file

@ -594,7 +594,7 @@ err_t mmnif_init(struct netif *netif)
goto out; goto out;
} }
err = vma_add((size_t)header_start_address, PAGE_FLOOR((size_t)header_start_address + ((nodes * header_size) >> PAGE_BITS)), VMA_READ|VMA_WRITE|VMA_CACHEABLE); err = vma_add((size_t)header_start_address, PAGE_CEIL((size_t)header_start_address + ((nodes * header_size) >> PAGE_BITS)), VMA_READ|VMA_WRITE|VMA_CACHEABLE);
if (BUILTIN_EXPECT(err, 0)) { if (BUILTIN_EXPECT(err, 0)) {
LOG_ERROR("mmnif init(): vma_add failed for header_start_address %p\n", header_start_address); LOG_ERROR("mmnif init(): vma_add failed for header_start_address %p\n", header_start_address);
goto out; goto out;
@ -620,7 +620,7 @@ err_t mmnif_init(struct netif *netif)
goto out; goto out;
} }
err = vma_add((size_t)heap_start_address, PAGE_FLOOR((size_t)heap_start_address + ((nodes * heap_size) >> PAGE_BITS)), VMA_READ|VMA_WRITE|VMA_CACHEABLE); err = vma_add((size_t)heap_start_address, PAGE_CEIL((size_t)heap_start_address + ((nodes * heap_size) >> PAGE_BITS)), VMA_READ|VMA_WRITE|VMA_CACHEABLE);
if (BUILTIN_EXPECT(!heap_start_address, 0)) if (BUILTIN_EXPECT(!heap_start_address, 0))
{ {
LOG_ERROR("mmnif init(): vma_add failed for heap_start_address %p\n", heap_start_address); LOG_ERROR("mmnif init(): vma_add failed for heap_start_address %p\n", heap_start_address);

View file

@ -48,7 +48,7 @@ extern const size_t image_size;
#define TIMER_FREQ 100 /* in HZ */ #define TIMER_FREQ 100 /* in HZ */
#define CLOCK_TICK_RATE 1193182 /* 8254 chip's internal oscillator frequency */ #define CLOCK_TICK_RATE 1193182 /* 8254 chip's internal oscillator frequency */
#define CACHE_LINE 64 #define CACHE_LINE 64
#define HEAP_START (PAGE_2M_FLOOR((size_t)&kernel_start + image_size) + 4*PAGE_SIZE) #define HEAP_START (PAGE_2M_CEIL((size_t)&kernel_start + image_size) + 4*PAGE_SIZE)
#define HEAP_SIZE (1ULL << 32) #define HEAP_SIZE (1ULL << 32)
#define KMSG_SIZE 0x1000 #define KMSG_SIZE 0x1000
#define INT_SYSCALL 0x80 #define INT_SYSCALL 0x80

View file

@ -342,8 +342,8 @@ static int initd(void* arg)
} }
curr_task->heap->flags = VMA_HEAP|VMA_USER; curr_task->heap->flags = VMA_HEAP|VMA_USER;
curr_task->heap->start = PAGE_FLOOR(heap); curr_task->heap->start = PAGE_CEIL(heap);
curr_task->heap->end = PAGE_FLOOR(heap); curr_task->heap->end = PAGE_CEIL(heap);
// region is already reserved for the heap, we have to change the // region is already reserved for the heap, we have to change the
// property of the first page // property of the first page

View file

@ -290,11 +290,11 @@ ssize_t sys_sbrk(ssize_t incr)
heap->end += incr; heap->end += incr;
// reserve VMA regions // reserve VMA regions
if (PAGE_CEIL(heap->end) > PAGE_CEIL(ret)) { if (PAGE_FLOOR(heap->end) > PAGE_FLOOR(ret)) {
// region is already reserved for the heap, we have to change the // region is already reserved for the heap, we have to change the
// property // property
vma_free(PAGE_CEIL(ret), PAGE_FLOOR(heap->end)); vma_free(PAGE_FLOOR(ret), PAGE_CEIL(heap->end));
vma_add(PAGE_CEIL(ret), PAGE_FLOOR(heap->end), VMA_HEAP|VMA_USER); vma_add(PAGE_FLOOR(ret), PAGE_CEIL(heap->end), VMA_HEAP|VMA_USER);
} }
} else ret = -ENOMEM; } else ret = -ENOMEM;

View file

@ -134,13 +134,13 @@ void buddy_dump(void)
void* palloc(size_t sz, uint32_t flags) void* palloc(size_t sz, uint32_t flags)
{ {
size_t phyaddr, viraddr, bits; size_t phyaddr, viraddr, bits;
uint32_t npages = PAGE_FLOOR(sz) >> PAGE_BITS; uint32_t npages = PAGE_CEIL(sz) >> PAGE_BITS;
int err; int err;
LOG_DEBUG("palloc(%zd) (%u pages)\n", sz, npages); LOG_DEBUG("palloc(%zd) (%u pages)\n", sz, npages);
// get free virtual address space // get free virtual address space
viraddr = vma_alloc(PAGE_FLOOR(sz), flags); viraddr = vma_alloc(PAGE_CEIL(sz), flags);
if (BUILTIN_EXPECT(!viraddr, 0)) if (BUILTIN_EXPECT(!viraddr, 0))
return NULL; return NULL;
@ -168,7 +168,7 @@ void* palloc(size_t sz, uint32_t flags)
void* create_stack(size_t sz) void* create_stack(size_t sz)
{ {
size_t phyaddr, viraddr, bits; size_t phyaddr, viraddr, bits;
uint32_t npages = PAGE_FLOOR(sz) >> PAGE_BITS; uint32_t npages = PAGE_CEIL(sz) >> PAGE_BITS;
int err; int err;
LOG_DEBUG("create_stack(0x%zx) (%u pages)\n", DEFAULT_STACK_SIZE, npages); LOG_DEBUG("create_stack(0x%zx) (%u pages)\n", DEFAULT_STACK_SIZE, npages);
@ -204,7 +204,7 @@ void* create_stack(size_t sz)
int destroy_stack(void* viraddr, size_t sz) int destroy_stack(void* viraddr, size_t sz)
{ {
size_t phyaddr; size_t phyaddr;
uint32_t npages = PAGE_FLOOR(sz) >> PAGE_BITS; uint32_t npages = PAGE_CEIL(sz) >> PAGE_BITS;
LOG_DEBUG("destroy_stack(0x%zx) (size 0x%zx)\n", viraddr, DEFAULT_STACK_SIZE); LOG_DEBUG("destroy_stack(0x%zx) (size 0x%zx)\n", viraddr, DEFAULT_STACK_SIZE);

View file

@ -54,12 +54,12 @@ int vma_init(void)
int ret; int ret;
LOG_INFO("vma_init: reserve vma region 0x%llx - 0x%llx\n", LOG_INFO("vma_init: reserve vma region 0x%llx - 0x%llx\n",
PAGE_2M_CEIL((size_t) &kernel_start), PAGE_2M_FLOOR((size_t) &kernel_start),
PAGE_2M_FLOOR((size_t) &kernel_start + image_size)); PAGE_2M_CEIL((size_t) &kernel_start + image_size));
// add Kernel // add Kernel
ret = vma_add(PAGE_2M_CEIL((size_t) &kernel_start), ret = vma_add(PAGE_2M_FLOOR((size_t) &kernel_start),
PAGE_2M_FLOOR((size_t) &kernel_start + image_size), PAGE_2M_CEIL((size_t) &kernel_start + image_size),
VMA_READ|VMA_WRITE|VMA_EXECUTE|VMA_CACHEABLE); VMA_READ|VMA_WRITE|VMA_EXECUTE|VMA_CACHEABLE);
if (BUILTIN_EXPECT(ret, 0)) if (BUILTIN_EXPECT(ret, 0))
goto out; goto out;