1
0
Fork 0
mirror of https://github.com/hermitcore/libhermit.git synced 2025-03-09 00:00:03 +01:00

add function for the stack creation, protect stack by the NX flags

This commit is contained in:
Stefan Lankes 2016-05-26 10:04:45 +02:00
parent 82017fe436
commit 82d5ec690e
4 changed files with 75 additions and 17 deletions

View file

@ -97,7 +97,6 @@ int create_default_frame(task_t* task, entry_point_t ep, void* arg, uint32_t cor
* This is the stack which will be activated and popped off for iret later.
*/
stack = (size_t*) (((size_t) task->stack + DEFAULT_STACK_SIZE - sizeof(size_t)) & ~0x1F); // => stack is 32byte aligned
stack += sizeof(size_t);
/* Only marker for debugging purposes, ... */
*stack-- = 0xDEADBEEF;

View file

@ -71,6 +71,14 @@ void* palloc(size_t sz, uint32_t flags);
*/
void* kmalloc(size_t sz);
/** @brief Create a stack with guard pages
*/
void* create_stack(void);
/** @brief Destroy stack with its guard pages
*/
int destroy_stack(void* addr);
/** @brief Release memory back to the buddy system
*
* Every block of memory allocated by kmalloc() is prefixed with a buddy_t

View file

@ -213,7 +213,8 @@ void finish_task_switch(void)
if (old->status == TASK_FINISHED) {
/* cleanup task */
if (old->stack) {
kfree(old->stack);
kprintf("Release stack at 0x%zx\n", old->stack);
destroy_stack(old->stack);
old->stack = NULL;
}
@ -333,14 +334,10 @@ int clone_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio)
curr_task = per_core(current_task);
stack = kmalloc(DEFAULT_STACK_SIZE + PAGE_SIZE);
stack = create_stack();
if (BUILTIN_EXPECT(!stack, 0))
return -ENOMEM;
// unmap the first page to detect a stack overflow
page_unmap((size_t)stack, 1);
stack = (void*) ((size_t) stack + PAGE_SIZE);
spinlock_irqsave_lock(&table_lock);
core_id = get_next_core_id();
@ -395,7 +392,7 @@ int clone_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio)
out:
if (ret)
kfree(stack);
destroy_stack(stack);
#if 0
if (core_id != CORE_ID)
@ -423,17 +420,13 @@ int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio, uint32_t c
if (BUILTIN_EXPECT(!readyqueues[core_id].idle, 0))
return -EINVAL;
stack = kmalloc(DEFAULT_STACK_SIZE + PAGE_SIZE);
stack = create_stack();
if (BUILTIN_EXPECT(!stack, 0))
return -ENOMEM;
// unmap the first page to detect a stack overflow
page_unmap((size_t)stack, 1);
stack = (void*) ((size_t) stack + PAGE_SIZE);
counter = kmalloc(sizeof(atomic_int64_t));
if (BUILTIN_EXPECT(!counter, 0)) {
kfree(stack);
destroy_stack(stack);
return -ENOMEM;
}
atomic_int64_set((atomic_int64_t*) counter, 0);
@ -489,7 +482,7 @@ out:
spinlock_irqsave_unlock(&table_lock);
if (ret) {
kfree(stack);
destroy_stack(stack);
kfree(counter);
}

View file

@ -78,7 +78,7 @@ static buddy_t* buddy_get(int exp)
else if ((exp >= BUDDY_ALLOC) && !buddy_large_avail(exp))
// theres no free buddy larger than exp =>
// we can allocate new memory
buddy = (buddy_t*) palloc(1<<exp, VMA_HEAP);
buddy = (buddy_t*) palloc(1<<exp, VMA_HEAP|VMA_CACHEABLE);
else {
// we recursivly request a larger buddy...
buddy = buddy_get(exp+1);
@ -139,7 +139,7 @@ void* palloc(size_t sz, uint32_t flags)
//kprintf("palloc(%lu) (%lu pages)\n", sz, npages);
// get free virtual address space
viraddr = vma_alloc(npages*PAGE_SIZE, flags);
viraddr = vma_alloc(PAGE_FLOOR(sz), flags);
if (BUILTIN_EXPECT(!viraddr, 0))
return NULL;
@ -167,6 +167,64 @@ void* palloc(size_t sz, uint32_t flags)
return (void*) viraddr;
}
void* create_stack(void)
{
size_t phyaddr, viraddr, bits;
uint32_t npages = PAGE_FLOOR(DEFAULT_STACK_SIZE) >> PAGE_BITS;
int err;
//kprintf("create_stack(0x%zx) (%lu pages)\n", DEFAULT_STACK_SIZE, npages);
// get free virtual address space
viraddr = vma_alloc((npages+2)*PAGE_SIZE, VMA_READ|VMA_WRITE|VMA_CACHEABLE);
if (BUILTIN_EXPECT(!viraddr, 0))
return NULL;
// get continous physical pages
phyaddr = get_pages(npages);
if (BUILTIN_EXPECT(!phyaddr, 0)) {
vma_free(viraddr, viraddr+(npages+2)*PAGE_SIZE);
return NULL;
}
bits = PG_RW|PG_GLOBAL;
// protect heap by the NX flag
if (has_nx())
bits |= PG_XD;
// map physical pages to VMA
err = page_map(viraddr+PAGE_SIZE, phyaddr, npages, bits);
if (BUILTIN_EXPECT(err, 0)) {
vma_free(viraddr, viraddr+(npages+2)*PAGE_SIZE);
put_pages(phyaddr, npages);
return NULL;
}
return (void*) (viraddr+PAGE_SIZE);
}
int destroy_stack(void* viraddr)
{
size_t phyaddr;
uint32_t npages = PAGE_FLOOR(DEFAULT_STACK_SIZE) >> PAGE_BITS;
//kprintf("destroy_stack(0x%zx) (size 0x%zx)\n", viraddr, DEFAULT_STACK_SIZE);
if (BUILTIN_EXPECT(!viraddr, 0))
return -ENOMEM;
phyaddr = virt_to_phys((size_t)viraddr);
if (BUILTIN_EXPECT(!phyaddr, 0))
return -ENOMEM;
// unmap and destroy stack
vma_free((size_t)viraddr-PAGE_SIZE, (size_t)viraddr+(npages+1)*PAGE_SIZE);
page_unmap((size_t)viraddr, npages);
put_pages(phyaddr, npages);
return 0;
}
void* kmalloc(size_t sz)
{
if (BUILTIN_EXPECT(!sz, 0))