diff --git a/hermit/arch/x86/include/asm/page.h b/hermit/arch/x86/include/asm/page.h index 134b04603..7a860c0ff 100644 --- a/hermit/arch/x86/include/asm/page.h +++ b/hermit/arch/x86/include/asm/page.h @@ -187,15 +187,4 @@ int page_unmap(size_t viraddr, size_t npages); */ int page_set_flags(size_t viraddr, uint32_t npages, int flags); -/** @brief Copy a whole page map tree - * - * @param dest Physical address of new page map - * @retval 0 Success. Everything went fine. - * @retval <0 Error. Something went wrong. - */ -int page_map_copy(struct task *dest); - -/** @brief Free a whole page map tree */ -int page_map_drop(void); - #endif diff --git a/hermit/arch/x86/kernel/apic.c b/hermit/arch/x86/kernel/apic.c index 7afc91eda..117b115c8 100644 --- a/hermit/arch/x86/kernel/apic.c +++ b/hermit/arch/x86/kernel/apic.c @@ -381,7 +381,7 @@ static int lapic_reset(void) */ int apic_calibration(void) { - uint32_t i; + //uint32_t i; uint32_t flags; uint64_t ticks, old; @@ -562,11 +562,13 @@ found_mp: apic_io_entry_t* io_entry = (apic_io_entry_t*) addr; ioapic = (ioapic_t*) ((size_t) io_entry->addr); kprintf("Found IOAPIC at 0x%x\n", ioapic); +#if 0 page_map(IOAPIC_ADDR, (size_t)ioapic & PAGE_MASK, 1, PG_GLOBAL | PG_RW | PG_PCD); vma_add(IOAPIC_ADDR, IOAPIC_ADDR + PAGE_SIZE, VMA_READ|VMA_WRITE); ioapic = (ioapic_t*) IOAPIC_ADDR; - addr += 8; kprintf("Map IOAPIC to 0x%x\n", ioapic); +#endif + addr += 8; } else if (*((uint8_t*) addr) == 3) { // IO_INT apic_ioirq_entry_t* extint = (apic_ioirq_entry_t*) addr; if (extint->src_bus == isa_bus) { @@ -881,6 +883,7 @@ int apic_init(void) return 0; } +#if 0 int ioapic_inton(uint8_t irq, uint8_t apicid) { ioapic_route_t route; @@ -954,3 +957,4 @@ int ioapic_intoff(uint8_t irq, uint8_t apicid) return 0; } +#endif diff --git a/hermit/arch/x86/kernel/tasks.c b/hermit/arch/x86/kernel/tasks.c index 976a7093f..0382759bf 100644 --- a/hermit/arch/x86/kernel/tasks.c +++ b/hermit/arch/x86/kernel/tasks.c @@ -46,63 +46,14 @@ extern const void percore_end0; extern uint64_t base; -static inline void enter_user_task(size_t ep, size_t stack) -{ - // don't interrupt the jump to user-level code - irq_disable(); - - asm volatile ("swapgs" ::: "memory"); - - // the jump also enable interrupts - jump_to_user_code(ep, stack); -} - static int thread_entry(void* arg, size_t ep) { -#if 0 - size_t addr, stack = 0; - size_t flags; - int64_t npages; - size_t offset = DEFAULT_STACK_SIZE-16; - - //create user-level stack - npages = DEFAULT_STACK_SIZE >> PAGE_BITS; - if (DEFAULT_STACK_SIZE & (PAGE_SIZE-1)) - npages++; - - addr = get_pages(npages); - if (BUILTIN_EXPECT(!addr, 0)) { - kprintf("load_task: not enough memory!\n"); - return -ENOMEM; - } - - stack = (1ULL << 34ULL) - curr_task->id*DEFAULT_STACK_SIZE-PAGE_SIZE; // virtual address of the stack - flags = PG_USER|PG_RW; - if (has_nx()) - flags |= PG_XD; - - if (page_map(stack, addr, npages, flags)) { - put_pages(addr, npages); - kprintf("Could not map stack at 0x%x\n", stack); - return -ENOMEM; - } - memset((void*) stack, 0x00, npages*PAGE_SIZE); - //kprintf("stack located at 0x%zx (0x%zx)\n", stack, addr); - - // create vma regions for the user-level stack - flags = VMA_CACHEABLE|VMA_USER|VMA_READ|VMA_WRITE; - vma_add(stack, stack+npages*PAGE_SIZE-1, flags); -#endif if (init_tls()) return -ENOMEM; //vma_dump(); - // set first argument - //asm volatile ("mov %0, %%rdi" :: "r"(arg)); - //enter_user_task(ep, stack+offset); - entry_point_t call_ep = (entry_point_t) ep; call_ep(arg); @@ -122,12 +73,6 @@ size_t* get_current_stack(void) set_per_core(kernel_stack, stptr); tss_set_rsp0(stptr); -#if 0 - // do we change the address space? - if (read_cr3() != curr_task->page_map) - write_cr3(curr_task->page_map); // use new page table -#endif - return curr_task->last_stack_pointer; } @@ -176,12 +121,9 @@ int create_default_frame(task_t* task, entry_point_t ep, void* arg, uint32_t cor /* The instruction pointer shall be set on the first function to be called after IRETing */ - //if ((size_t) ep < KERNEL_SPACE) { - // stptr->rip = (size_t)ep; - //} else { - stptr->rip = (size_t)thread_entry; - stptr->rsi = (size_t)ep; // use second argument to transfer the entry point - //} + stptr->rip = (size_t)thread_entry; + stptr->rsi = (size_t)ep; // use second argument to transfer the entry point + stptr->cs = 0x08; stptr->ss = 0x10; stptr->gs = core_id * ((size_t) &percore_end0 - (size_t) &percore_start); diff --git a/hermit/arch/x86/mm/page.c b/hermit/arch/x86/mm/page.c index bed308c73..a40a18236 100644 --- a/hermit/arch/x86/mm/page.c +++ b/hermit/arch/x86/mm/page.c @@ -52,8 +52,8 @@ extern const void kernel_start; /// This page is reserved for copying #define PAGE_TMP (PAGE_FLOOR((size_t) &kernel_start) - PAGE_SIZE) -/** Lock for kernel space page tables */ -static spinlock_t kslock = SPINLOCK_INIT; +/** Single-address space operating system => one lock for all tasks */ +static spinlock_irqsave_t page_lock = SPINLOCK_IRQSAVE_INIT; /** This PGD table is initialized in entry.asm */ extern size_t* boot_map; @@ -119,11 +119,7 @@ int page_map(size_t viraddr, size_t phyaddr, size_t npages, size_t bits) curr_task = per_core(current_task); - /** @todo: might not be sufficient! */ - if (bits & PG_USER) - spinlock_irqsave_lock(curr_task->page_lock); - else - spinlock_lock(&kslock); + spinlock_irqsave_lock(&page_lock); /* Start iterating through the entries * beginning at the root table (PGD or PML4) */ @@ -172,117 +168,29 @@ int page_map(size_t viraddr, size_t phyaddr, size_t npages, size_t bits) ret = 0; out: - if (bits & PG_USER) - spinlock_irqsave_unlock(curr_task->page_lock); - else - spinlock_unlock(&kslock); + spinlock_irqsave_unlock(&page_lock); return ret; } -/** Tables are freed by page_map_drop() */ int page_unmap(size_t viraddr, size_t npages) { - task_t* curr_task = per_core(current_task); - - /* We aquire both locks for kernel and task tables - * as we dont know to which the region belongs. */ - spinlock_irqsave_lock(curr_task->page_lock); - spinlock_lock(&kslock); + spinlock_irqsave_lock(&page_lock); /* Start iterating through the entries. * Only the PGT entries are removed. Tables remain allocated. */ size_t vpn, start = viraddr>>PAGE_BITS; - for (vpn=start; vpnpage_lock); - spinlock_unlock(&kslock); + spinlock_irqsave_unlock(&page_lock); /* This can't fail because we don't make checks here */ return 0; } -int page_map_drop(void) -{ - task_t* curr_task = per_core(current_task); - - void traverse(int lvl, long vpn) { - long stop; - for (stop=vpn+PAGE_MAP_ENTRIES; vpnuser_usage); - } - } - } - - spinlock_irqsave_lock(curr_task->page_lock); - - traverse(PAGE_LEVELS-1, 0); - - spinlock_irqsave_unlock(curr_task->page_lock); - - /* This can't fail because we don't make checks here */ - return 0; -} - -int page_map_copy(task_t *dest) -{ - task_t* curr_task = per_core(current_task); - - int traverse(int lvl, long vpn) { - long stop; - for (stop=vpn+PAGE_MAP_ENTRIES; vpnuser_usage); - - other[lvl][vpn] = phyaddr | (self[lvl][vpn] & ~PAGE_MASK); - if (lvl) /* PML4, PDPT, PGD */ - traverse(lvl-1, vpn<page_map |= PG_PRESENT; - - spinlock_irqsave_lock(curr_task->page_lock); - self[PAGE_LEVELS-1][PAGE_MAP_ENTRIES-2] = dest->page_map | PG_PRESENT | PG_SELF | PG_ACCESSED | PG_RW; - - int ret = traverse(PAGE_LEVELS-1, 0); - - other[PAGE_LEVELS-1][PAGE_MAP_ENTRIES-1] = dest->page_map | PG_PRESENT | PG_SELF | PG_ACCESSED | PG_RW; - self [PAGE_LEVELS-1][PAGE_MAP_ENTRIES-2] = 0; - spinlock_irqsave_unlock(curr_task->page_lock); - - /* Flush TLB entries of 'other' self-reference */ - flush_tlb(); - - return ret; -} - void page_fault_handler(struct state *s) { size_t viraddr = read_cr2(); @@ -309,7 +217,7 @@ void page_fault_handler(struct state *s) return 1; } - spinlock_irqsave_lock(task->page_lock); + spinlock_irqsave_lock(&page_lock); if ((task->heap) && (viraddr >= task->heap->start) && (viraddr < task->heap->end)) { /* @@ -317,7 +225,7 @@ void page_fault_handler(struct state *s) */ if (check_pagetables(viraddr)) { //tlb_flush_one_page(viraddr); - spinlock_irqsave_unlock(task->page_lock); + spinlock_irqsave_unlock(&page_lock); return; } @@ -341,13 +249,13 @@ void page_fault_handler(struct state *s) // TODO: reusing of old data is possible => security issue - spinlock_irqsave_unlock(task->page_lock); + spinlock_irqsave_unlock(&page_lock); return; } default_handler: - spinlock_irqsave_unlock(task->page_lock); + spinlock_irqsave_unlock(&page_lock); kprintf("Page Fault Exception (%d) on core %d at cs:ip = %#x:%#lx, fs = %#lx, gs = %#lx, rflags 0x%lx, task = %u, addr = %#lx, error = %#x [ %s %s %s %s %s ]\n", s->int_no, CORE_ID, s->cs, s->rip, s->fs, s->gs, s->rflags, task->id, viraddr, s->error, diff --git a/hermit/include/hermit/memory.h b/hermit/include/hermit/memory.h index 739e414e3..4c77bc879 100644 --- a/hermit/include/hermit/memory.h +++ b/hermit/include/hermit/memory.h @@ -57,14 +57,4 @@ int put_pages(size_t phyaddr, size_t npages); */ static inline int put_page(size_t phyaddr) { return put_pages(phyaddr, 1); } -/** @brief Copy a physical page frame - * - * @param psrc physical address of source page frame - * @param pdest physical address of source page frame - * @return - * - 0 on success - * - -1 on failure - */ -int copy_page(size_t pdest, size_t psrc); - #endif diff --git a/hermit/include/hermit/stdlib.h b/hermit/include/hermit/stdlib.h index 2cf83b55d..a29f08b5a 100644 --- a/hermit/include/hermit/stdlib.h +++ b/hermit/include/hermit/stdlib.h @@ -62,19 +62,6 @@ void NORETURN do_abort(void); */ void* palloc(size_t sz, uint32_t flags); -/** @brief Free general kernel pages - * - * This function removes the memory from the VMA subsystem, - * unmap the pages and releases the physical pages. - * - * The pmalloc() doesn't track how much memory was allocated for which pointer, - * so you have to specify how much memory shall be freed. - * - * @param addr The virtual address returned by palloc(). - * @param sz The size which should freed - */ -void pfree(void* addr, size_t sz); - /** @brief The memory allocator function * * This allocator uses a buddy system to allocate memory. diff --git a/hermit/include/hermit/tasks_types.h b/hermit/include/hermit/tasks_types.h index 59c563185..81e90e6f8 100644 --- a/hermit/include/hermit/tasks_types.h +++ b/hermit/include/hermit/tasks_types.h @@ -86,14 +86,6 @@ typedef struct task { uint8_t prio; /// timeout for a blocked task uint64_t timeout; - /// Physical address of root page table - size_t page_map; - /// Lock for page tables - spinlock_irqsave_t* page_lock; - /// lock for the VMA_list - spinlock_t* vma_lock; - /// list of VMAs - vma_t* vma_list; /// starting time/tick of the task uint64_t start_tick; /// the userspace heap diff --git a/hermit/include/hermit/vma.h b/hermit/include/hermit/vma.h index 981f8fee2..fb17c5975 100644 --- a/hermit/include/hermit/vma.h +++ b/hermit/include/hermit/vma.h @@ -59,12 +59,10 @@ extern "C" { #define VMA_HEAP (VMA_READ|VMA_WRITE|VMA_CACHEABLE) // boundaries for VAS allocation -#define VMA_KERN_MIN 0xC0000 -#define VMA_KERN_MAX KERNEL_SPACE -#define VMA_USER_MIN KERNEL_SPACE +#define VMA_MIN 0xC0000 // last three top level entries are reserved -#define VMA_USER_MAX 0xFFFFFE8000000000 +#define VMA_MAX 0xFFFFFE8000000000 struct vma; @@ -130,21 +128,6 @@ size_t vma_alloc(size_t size, uint32_t flags); */ int vma_free(size_t start, size_t end); -/** @brief Free all virtual memory areas - * - * @return - * - 0 on success - */ -int drop_vma_list(struct task* task); - -/** @brief Copy the VMA list of the current task to task - * - * @param task The task where the list should be copied to - * @return - * - 0 on success - */ -int copy_vma_list(struct task* src, struct task* dest); - /** @brief Dump information about this task's VMAs into the terminal. */ void vma_dump(void); diff --git a/hermit/kernel/syscall.c b/hermit/kernel/syscall.c index 47c5980d5..40282cf9d 100644 --- a/hermit/kernel/syscall.c +++ b/hermit/kernel/syscall.c @@ -215,13 +215,14 @@ ssize_t writev(int fildes, const struct iovec *iov, int iovcnt) ssize_t sys_sbrk(ssize_t incr) { - task_t* task = per_core(current_task); - vma_t* heap = task->heap; ssize_t ret; + vma_t* heap = per_core(current_task)->heap; + static spinlock_t heap_lock = SPINLOCK_INIT; - spinlock_lock(task->vma_lock); + spinlock_lock(&heap_lock); if (BUILTIN_EXPECT(!heap, 0)) { + spinlock_unlock(&heap_lock); kprintf("sys_sbrk: missing heap!\n"); do_abort(); } @@ -234,7 +235,7 @@ ssize_t sys_sbrk(ssize_t incr) // allocation and mapping of new pages for the heap // is catched by the pagefault handler - spinlock_unlock(task->vma_lock); + spinlock_unlock(&heap_lock); return ret; } diff --git a/hermit/kernel/tasks.c b/hermit/kernel/tasks.c index 1450a55ed..68125f977 100644 --- a/hermit/kernel/tasks.c +++ b/hermit/kernel/tasks.c @@ -46,20 +46,13 @@ extern const void tls_end; #define TLS_OFFSET 8 -/* - * HermitCore is a single address space OS - * => we need only a lock to protect the page tables & VMA - */ -static spinlock_irqsave_t page_lock = SPINLOCK_IRQSAVE_INIT; -static spinlock_t vma_lock = SPINLOCK_INIT; - /** @brief Array of task structures (aka PCB) * * A task's id will be its position in this array. */ static task_t task_table[MAX_TASKS] = { \ - [0] = {0, TASK_IDLE, 0, NULL, NULL, TASK_DEFAULT_FLAGS, 0, 0, 0, &page_lock, &vma_lock, NULL, 0, NULL, NULL, 0, NULL, NULL, 0, 0, 0}, \ - [1 ... MAX_TASKS-1] = {0, TASK_INVALID, 0, NULL, NULL, TASK_DEFAULT_FLAGS, 0, 0, 0, &page_lock, &vma_lock, NULL, 0, NULL, NULL, 0, NULL, NULL, 0, 0, 0}}; + [0] = {0, TASK_IDLE, 0, NULL, NULL, TASK_DEFAULT_FLAGS, 0, 0, 0, NULL, NULL, 0, NULL, NULL, 0, 0, 0}, \ + [1 ... MAX_TASKS-1] = {0, TASK_INVALID, 0, NULL, NULL, TASK_DEFAULT_FLAGS, 0, 0, 0, NULL, NULL, 0, NULL, NULL, 0, 0, 0}}; static spinlock_irqsave_t table_lock = SPINLOCK_IRQSAVE_INIT; @@ -115,7 +108,6 @@ int multitasking_init(void) task_table[0].stack = (char*) ((size_t)&boot_stack + core_id * KERNEL_STACK_SIZE); set_per_core(kernel_stack, task_table[0].stack + KERNEL_STACK_SIZE - 0x10); set_per_core(current_task, task_table+0); - task_table[0].page_map = read_cr3(); readyqueues[core_id].idle = task_table+0; @@ -168,12 +160,8 @@ int set_idle_task(void) task_table[i].stack = (char*) ((size_t)&boot_stack + core_id * KERNEL_STACK_SIZE); set_per_core(kernel_stack, task_table[i].stack + KERNEL_STACK_SIZE - 0x10); task_table[i].prio = IDLE_PRIO; - task_table[i].vma_lock = &vma_lock; - task_table[i].vma_list = NULL; task_table[i].heap = NULL; - task_table[i].page_lock = &page_lock; task_table[i].user_usage = NULL; - task_table[i].page_map = read_cr3(); readyqueues[core_id].idle = task_table+i; set_per_core(current_task, readyqueues[core_id].idle); ret = 0; @@ -369,7 +357,6 @@ int clone_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio) task_table[i].last_stack_pointer = NULL; task_table[i].stack = stack; task_table[i].prio = prio; - task_table[i].vma_list = curr_task->vma_list; task_table[i].heap = curr_task->heap; task_table[i].start_tick = get_clock_tick(); task_table[i].parent = curr_task->id; @@ -377,7 +364,6 @@ int clone_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio) task_table[i].tls_size = curr_task->tls_size; task_table[i].lwip_err = 0; task_table[i].user_usage = curr_task->user_usage; - task_table[i].page_map = curr_task->page_map; if (id) *id = i; @@ -428,7 +414,6 @@ int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio, uint32_t c uint32_t i; void* stack = NULL; void* counter = NULL; - task_t* curr_task; if (BUILTIN_EXPECT(!ep, 0)) return -EINVAL; @@ -441,8 +426,6 @@ int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio, uint32_t c if (BUILTIN_EXPECT(!readyqueues[core_id].idle, 0)) return -EINVAL; - curr_task = per_core(current_task); - stack = kmalloc(DEFAULT_STACK_SIZE); if (BUILTIN_EXPECT(!stack, 0)) return -ENOMEM; @@ -464,17 +447,12 @@ int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio, uint32_t c task_table[i].last_stack_pointer = NULL; task_table[i].stack = stack; task_table[i].prio = prio; - task_table[i].vma_lock = &vma_lock; - task_table[i].vma_list = NULL; task_table[i].heap = NULL; task_table[i].start_tick = get_clock_tick(); task_table[i].parent = 0; task_table[i].tls_addr = 0; task_table[i].tls_size = 0; task_table[i].lwip_err = 0; - - task_table[i].page_lock = &page_lock; - task_table[i].page_map = curr_task->page_map; task_table[i].user_usage = (atomic_int64_t*) counter; if (id) diff --git a/hermit/mm/malloc.c b/hermit/mm/malloc.c index 5382fb4e4..1c846d404 100644 --- a/hermit/mm/malloc.c +++ b/hermit/mm/malloc.c @@ -161,26 +161,6 @@ void* palloc(size_t sz, uint32_t flags) return (void*) viraddr; } -void pfree(void* addr, size_t sz) -{ - if (BUILTIN_EXPECT(!addr || !sz, 0)) - return; - - size_t i; - size_t phyaddr; - size_t viraddr = (size_t) addr & PAGE_MASK; - uint32_t npages = PAGE_FLOOR(sz) >> PAGE_BITS; - - // memory is probably not continuously mapped! (userspace heap) - for (i=0; ivma_list; - lock = task->vma_lock; - } - else { - base = VMA_KERN_MIN; - limit = VMA_KERN_MAX; - list = &vma_list; - lock = &vma_lock; - } + // boundaries for search + size_t base = VMA_MIN; + size_t limit = VMA_MAX; spinlock_lock(lock); @@ -148,28 +138,15 @@ found: int vma_free(size_t start, size_t end) { - task_t* task = per_core(current_task); - spinlock_t* lock; + spinlock_t* lock = &vma_lock; vma_t* vma; - vma_t** list = NULL; + vma_t** list = &vma_list; //kprintf("vma_free: start = %#lx, end = %#lx\n", start, end); if (BUILTIN_EXPECT(start >= end, 0)) return -EINVAL; - if (end < VMA_KERN_MAX) { - lock = &vma_lock; - list = &vma_list; - } - else if (start >= VMA_KERN_MAX) { - lock = task->vma_lock; - list = &task->vma_list; - } - - if (BUILTIN_EXPECT(!list || !*list, 0)) - return -EINVAL; - spinlock_lock(lock); // search vma @@ -221,30 +198,12 @@ int vma_free(size_t start, size_t end) int vma_add(size_t start, size_t end, uint32_t flags) { - task_t* task = per_core(current_task); - spinlock_t* lock; - vma_t** list; + spinlock_t* lock = &vma_lock; + vma_t** list = &vma_list; if (BUILTIN_EXPECT(start >= end, 0)) return -EINVAL; - if (flags & VMA_USER) { - list = &task->vma_list; - lock = task->vma_lock; - - // check if address is in userspace - if (BUILTIN_EXPECT(start < VMA_KERN_MAX, 0)) - return -EINVAL; - } - else { - list = &vma_list; - lock = &vma_lock; - - // check if address is in kernelspace - if (BUILTIN_EXPECT(end >= VMA_KERN_MAX, 0)) - return -EINVAL; - } - //kprintf("vma_add: start = %#lx, end = %#lx, flags = %#x\n", start, end, flags); spinlock_lock(lock); @@ -292,58 +251,6 @@ int vma_add(size_t start, size_t end, uint32_t flags) return 0; } -int copy_vma_list(task_t* src, task_t* dest) -{ - spinlock_init(dest->vma_lock); - - spinlock_lock(src->vma_lock); - spinlock_lock(dest->vma_lock); - - vma_t* last = NULL; - vma_t* old; - for (old=src->vma_list; old; old=old->next) { - vma_t *new = kmalloc(sizeof(vma_t)); - if (BUILTIN_EXPECT(!new, 0)) { - spinlock_unlock(dest->vma_lock); - spinlock_unlock(src->vma_lock); - return -ENOMEM; - } - - new->start = old->start; - new->end = old->end; - new->flags = old->flags; - new->prev = last; - - if (last) - last->next = new; - else - dest->vma_list = new; - - last = new; - } - - spinlock_unlock(dest->vma_lock); - spinlock_unlock(src->vma_lock); - - return 0; -} - -int drop_vma_list(task_t *task) -{ - vma_t* vma; - - spinlock_lock(task->vma_lock); - - while ((vma = task->vma_list)) { - task->vma_list = vma->next; - kfree(vma); - } - - spinlock_unlock(task->vma_lock); - - return 0; -} - void vma_dump(void) { void print_vma(vma_t *vma) { @@ -356,15 +263,8 @@ void vma_dump(void) } } - task_t* task = per_core(current_task); - - kputs("Kernelspace VMAs:\n"); + kputs("VMAs:\n"); spinlock_lock(&vma_lock); - print_vma(vma_list); + print_vma(&vma_boot); spinlock_unlock(&vma_lock); - - kputs("Userspace VMAs:\n"); - spinlock_lock(task->vma_lock); - print_vma(task->vma_list); - spinlock_unlock(task->vma_lock); }