From 82d5ec690e1b0905029086ef156243ebbf7e2feb Mon Sep 17 00:00:00 2001 From: Stefan Lankes Date: Thu, 26 May 2016 10:04:45 +0200 Subject: [PATCH] add function for the stack creation, protect stack by the NX flags --- hermit/arch/x86/kernel/tasks.c | 1 - hermit/include/hermit/stdlib.h | 8 +++++ hermit/kernel/tasks.c | 21 ++++-------- hermit/mm/malloc.c | 62 ++++++++++++++++++++++++++++++++-- 4 files changed, 75 insertions(+), 17 deletions(-) diff --git a/hermit/arch/x86/kernel/tasks.c b/hermit/arch/x86/kernel/tasks.c index dfe0dbdd3..378a069c6 100644 --- a/hermit/arch/x86/kernel/tasks.c +++ b/hermit/arch/x86/kernel/tasks.c @@ -97,7 +97,6 @@ int create_default_frame(task_t* task, entry_point_t ep, void* arg, uint32_t cor * This is the stack which will be activated and popped off for iret later. */ stack = (size_t*) (((size_t) task->stack + DEFAULT_STACK_SIZE - sizeof(size_t)) & ~0x1F); // => stack is 32byte aligned - stack += sizeof(size_t); /* Only marker for debugging purposes, ... */ *stack-- = 0xDEADBEEF; diff --git a/hermit/include/hermit/stdlib.h b/hermit/include/hermit/stdlib.h index a29f08b5a..356669662 100644 --- a/hermit/include/hermit/stdlib.h +++ b/hermit/include/hermit/stdlib.h @@ -71,6 +71,14 @@ void* palloc(size_t sz, uint32_t flags); */ void* kmalloc(size_t sz); +/** @brief Create a stack with guard pages + */ +void* create_stack(void); + +/** @brief Destroy stack with its guard pages + */ +int destroy_stack(void* addr); + /** @brief Release memory back to the buddy system * * Every block of memory allocated by kmalloc() is prefixed with a buddy_t diff --git a/hermit/kernel/tasks.c b/hermit/kernel/tasks.c index 1857a72cc..73a76507d 100644 --- a/hermit/kernel/tasks.c +++ b/hermit/kernel/tasks.c @@ -213,7 +213,8 @@ void finish_task_switch(void) if (old->status == TASK_FINISHED) { /* cleanup task */ if (old->stack) { - kfree(old->stack); + kprintf("Release stack at 0x%zx\n", old->stack); + destroy_stack(old->stack); old->stack = NULL; } @@ -333,14 +334,10 @@ int clone_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio) curr_task = per_core(current_task); - stack = kmalloc(DEFAULT_STACK_SIZE + PAGE_SIZE); + stack = create_stack(); if (BUILTIN_EXPECT(!stack, 0)) return -ENOMEM; - // unmap the first page to detect a stack overflow - page_unmap((size_t)stack, 1); - stack = (void*) ((size_t) stack + PAGE_SIZE); - spinlock_irqsave_lock(&table_lock); core_id = get_next_core_id(); @@ -395,7 +392,7 @@ int clone_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio) out: if (ret) - kfree(stack); + destroy_stack(stack); #if 0 if (core_id != CORE_ID) @@ -423,17 +420,13 @@ int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio, uint32_t c if (BUILTIN_EXPECT(!readyqueues[core_id].idle, 0)) return -EINVAL; - stack = kmalloc(DEFAULT_STACK_SIZE + PAGE_SIZE); + stack = create_stack(); if (BUILTIN_EXPECT(!stack, 0)) return -ENOMEM; - // unmap the first page to detect a stack overflow - page_unmap((size_t)stack, 1); - stack = (void*) ((size_t) stack + PAGE_SIZE); - counter = kmalloc(sizeof(atomic_int64_t)); if (BUILTIN_EXPECT(!counter, 0)) { - kfree(stack); + destroy_stack(stack); return -ENOMEM; } atomic_int64_set((atomic_int64_t*) counter, 0); @@ -489,7 +482,7 @@ out: spinlock_irqsave_unlock(&table_lock); if (ret) { - kfree(stack); + destroy_stack(stack); kfree(counter); } diff --git a/hermit/mm/malloc.c b/hermit/mm/malloc.c index 9e1aeba31..5c30fbe97 100644 --- a/hermit/mm/malloc.c +++ b/hermit/mm/malloc.c @@ -78,7 +78,7 @@ static buddy_t* buddy_get(int exp) else if ((exp >= BUDDY_ALLOC) && !buddy_large_avail(exp)) // theres no free buddy larger than exp => // we can allocate new memory - buddy = (buddy_t*) palloc(1<> PAGE_BITS; + int err; + + //kprintf("create_stack(0x%zx) (%lu pages)\n", DEFAULT_STACK_SIZE, npages); + + // get free virtual address space + viraddr = vma_alloc((npages+2)*PAGE_SIZE, VMA_READ|VMA_WRITE|VMA_CACHEABLE); + if (BUILTIN_EXPECT(!viraddr, 0)) + return NULL; + + // get continous physical pages + phyaddr = get_pages(npages); + if (BUILTIN_EXPECT(!phyaddr, 0)) { + vma_free(viraddr, viraddr+(npages+2)*PAGE_SIZE); + return NULL; + } + + bits = PG_RW|PG_GLOBAL; + // protect heap by the NX flag + if (has_nx()) + bits |= PG_XD; + + // map physical pages to VMA + err = page_map(viraddr+PAGE_SIZE, phyaddr, npages, bits); + if (BUILTIN_EXPECT(err, 0)) { + vma_free(viraddr, viraddr+(npages+2)*PAGE_SIZE); + put_pages(phyaddr, npages); + return NULL; + } + + return (void*) (viraddr+PAGE_SIZE); +} + +int destroy_stack(void* viraddr) +{ + size_t phyaddr; + uint32_t npages = PAGE_FLOOR(DEFAULT_STACK_SIZE) >> PAGE_BITS; + + //kprintf("destroy_stack(0x%zx) (size 0x%zx)\n", viraddr, DEFAULT_STACK_SIZE); + + if (BUILTIN_EXPECT(!viraddr, 0)) + return -ENOMEM; + + phyaddr = virt_to_phys((size_t)viraddr); + if (BUILTIN_EXPECT(!phyaddr, 0)) + return -ENOMEM; + + // unmap and destroy stack + vma_free((size_t)viraddr-PAGE_SIZE, (size_t)viraddr+(npages+1)*PAGE_SIZE); + page_unmap((size_t)viraddr, npages); + put_pages(phyaddr, npages); + + return 0; +} + void* kmalloc(size_t sz) { if (BUILTIN_EXPECT(!sz, 0))