mirror of
https://github.com/hermitcore/libhermit.git
synced 2025-03-09 00:00:03 +01:00
simplify memory management
HermitCore is a single-address space operating system => only one global lock is required to protect the page tables and the VMA structure. Furthermore, obsolete code to duplicate tasks is removed.
This commit is contained in:
parent
f3f1af3526
commit
545073235a
13 changed files with 45 additions and 428 deletions
|
@ -187,15 +187,4 @@ int page_unmap(size_t viraddr, size_t npages);
|
|||
*/
|
||||
int page_set_flags(size_t viraddr, uint32_t npages, int flags);
|
||||
|
||||
/** @brief Copy a whole page map tree
|
||||
*
|
||||
* @param dest Physical address of new page map
|
||||
* @retval 0 Success. Everything went fine.
|
||||
* @retval <0 Error. Something went wrong.
|
||||
*/
|
||||
int page_map_copy(struct task *dest);
|
||||
|
||||
/** @brief Free a whole page map tree */
|
||||
int page_map_drop(void);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -381,7 +381,7 @@ static int lapic_reset(void)
|
|||
*/
|
||||
int apic_calibration(void)
|
||||
{
|
||||
uint32_t i;
|
||||
//uint32_t i;
|
||||
uint32_t flags;
|
||||
uint64_t ticks, old;
|
||||
|
||||
|
@ -562,11 +562,13 @@ found_mp:
|
|||
apic_io_entry_t* io_entry = (apic_io_entry_t*) addr;
|
||||
ioapic = (ioapic_t*) ((size_t) io_entry->addr);
|
||||
kprintf("Found IOAPIC at 0x%x\n", ioapic);
|
||||
#if 0
|
||||
page_map(IOAPIC_ADDR, (size_t)ioapic & PAGE_MASK, 1, PG_GLOBAL | PG_RW | PG_PCD);
|
||||
vma_add(IOAPIC_ADDR, IOAPIC_ADDR + PAGE_SIZE, VMA_READ|VMA_WRITE);
|
||||
ioapic = (ioapic_t*) IOAPIC_ADDR;
|
||||
addr += 8;
|
||||
kprintf("Map IOAPIC to 0x%x\n", ioapic);
|
||||
#endif
|
||||
addr += 8;
|
||||
} else if (*((uint8_t*) addr) == 3) { // IO_INT
|
||||
apic_ioirq_entry_t* extint = (apic_ioirq_entry_t*) addr;
|
||||
if (extint->src_bus == isa_bus) {
|
||||
|
@ -881,6 +883,7 @@ int apic_init(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#if 0
|
||||
int ioapic_inton(uint8_t irq, uint8_t apicid)
|
||||
{
|
||||
ioapic_route_t route;
|
||||
|
@ -954,3 +957,4 @@ int ioapic_intoff(uint8_t irq, uint8_t apicid)
|
|||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -46,63 +46,14 @@ extern const void percore_end0;
|
|||
|
||||
extern uint64_t base;
|
||||
|
||||
static inline void enter_user_task(size_t ep, size_t stack)
|
||||
{
|
||||
// don't interrupt the jump to user-level code
|
||||
irq_disable();
|
||||
|
||||
asm volatile ("swapgs" ::: "memory");
|
||||
|
||||
// the jump also enable interrupts
|
||||
jump_to_user_code(ep, stack);
|
||||
}
|
||||
|
||||
static int thread_entry(void* arg, size_t ep)
|
||||
{
|
||||
#if 0
|
||||
size_t addr, stack = 0;
|
||||
size_t flags;
|
||||
int64_t npages;
|
||||
size_t offset = DEFAULT_STACK_SIZE-16;
|
||||
|
||||
//create user-level stack
|
||||
npages = DEFAULT_STACK_SIZE >> PAGE_BITS;
|
||||
if (DEFAULT_STACK_SIZE & (PAGE_SIZE-1))
|
||||
npages++;
|
||||
|
||||
addr = get_pages(npages);
|
||||
if (BUILTIN_EXPECT(!addr, 0)) {
|
||||
kprintf("load_task: not enough memory!\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
stack = (1ULL << 34ULL) - curr_task->id*DEFAULT_STACK_SIZE-PAGE_SIZE; // virtual address of the stack
|
||||
flags = PG_USER|PG_RW;
|
||||
if (has_nx())
|
||||
flags |= PG_XD;
|
||||
|
||||
if (page_map(stack, addr, npages, flags)) {
|
||||
put_pages(addr, npages);
|
||||
kprintf("Could not map stack at 0x%x\n", stack);
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset((void*) stack, 0x00, npages*PAGE_SIZE);
|
||||
//kprintf("stack located at 0x%zx (0x%zx)\n", stack, addr);
|
||||
|
||||
// create vma regions for the user-level stack
|
||||
flags = VMA_CACHEABLE|VMA_USER|VMA_READ|VMA_WRITE;
|
||||
vma_add(stack, stack+npages*PAGE_SIZE-1, flags);
|
||||
#endif
|
||||
|
||||
if (init_tls())
|
||||
return -ENOMEM;
|
||||
|
||||
//vma_dump();
|
||||
|
||||
// set first argument
|
||||
//asm volatile ("mov %0, %%rdi" :: "r"(arg));
|
||||
//enter_user_task(ep, stack+offset);
|
||||
|
||||
entry_point_t call_ep = (entry_point_t) ep;
|
||||
call_ep(arg);
|
||||
|
||||
|
@ -122,12 +73,6 @@ size_t* get_current_stack(void)
|
|||
set_per_core(kernel_stack, stptr);
|
||||
tss_set_rsp0(stptr);
|
||||
|
||||
#if 0
|
||||
// do we change the address space?
|
||||
if (read_cr3() != curr_task->page_map)
|
||||
write_cr3(curr_task->page_map); // use new page table
|
||||
#endif
|
||||
|
||||
return curr_task->last_stack_pointer;
|
||||
}
|
||||
|
||||
|
@ -176,12 +121,9 @@ int create_default_frame(task_t* task, entry_point_t ep, void* arg, uint32_t cor
|
|||
|
||||
/* The instruction pointer shall be set on the first function to be called
|
||||
after IRETing */
|
||||
//if ((size_t) ep < KERNEL_SPACE) {
|
||||
// stptr->rip = (size_t)ep;
|
||||
//} else {
|
||||
stptr->rip = (size_t)thread_entry;
|
||||
stptr->rsi = (size_t)ep; // use second argument to transfer the entry point
|
||||
//}
|
||||
stptr->rip = (size_t)thread_entry;
|
||||
stptr->rsi = (size_t)ep; // use second argument to transfer the entry point
|
||||
|
||||
stptr->cs = 0x08;
|
||||
stptr->ss = 0x10;
|
||||
stptr->gs = core_id * ((size_t) &percore_end0 - (size_t) &percore_start);
|
||||
|
|
|
@ -52,8 +52,8 @@ extern const void kernel_start;
|
|||
/// This page is reserved for copying
|
||||
#define PAGE_TMP (PAGE_FLOOR((size_t) &kernel_start) - PAGE_SIZE)
|
||||
|
||||
/** Lock for kernel space page tables */
|
||||
static spinlock_t kslock = SPINLOCK_INIT;
|
||||
/** Single-address space operating system => one lock for all tasks */
|
||||
static spinlock_irqsave_t page_lock = SPINLOCK_IRQSAVE_INIT;
|
||||
|
||||
/** This PGD table is initialized in entry.asm */
|
||||
extern size_t* boot_map;
|
||||
|
@ -119,11 +119,7 @@ int page_map(size_t viraddr, size_t phyaddr, size_t npages, size_t bits)
|
|||
|
||||
curr_task = per_core(current_task);
|
||||
|
||||
/** @todo: might not be sufficient! */
|
||||
if (bits & PG_USER)
|
||||
spinlock_irqsave_lock(curr_task->page_lock);
|
||||
else
|
||||
spinlock_lock(&kslock);
|
||||
spinlock_irqsave_lock(&page_lock);
|
||||
|
||||
/* Start iterating through the entries
|
||||
* beginning at the root table (PGD or PML4) */
|
||||
|
@ -172,117 +168,29 @@ int page_map(size_t viraddr, size_t phyaddr, size_t npages, size_t bits)
|
|||
|
||||
ret = 0;
|
||||
out:
|
||||
if (bits & PG_USER)
|
||||
spinlock_irqsave_unlock(curr_task->page_lock);
|
||||
else
|
||||
spinlock_unlock(&kslock);
|
||||
spinlock_irqsave_unlock(&page_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/** Tables are freed by page_map_drop() */
|
||||
int page_unmap(size_t viraddr, size_t npages)
|
||||
{
|
||||
task_t* curr_task = per_core(current_task);
|
||||
|
||||
/* We aquire both locks for kernel and task tables
|
||||
* as we dont know to which the region belongs. */
|
||||
spinlock_irqsave_lock(curr_task->page_lock);
|
||||
spinlock_lock(&kslock);
|
||||
spinlock_irqsave_lock(&page_lock);
|
||||
|
||||
/* Start iterating through the entries.
|
||||
* Only the PGT entries are removed. Tables remain allocated. */
|
||||
size_t vpn, start = viraddr>>PAGE_BITS;
|
||||
for (vpn=start; vpn<start+npages; vpn++)
|
||||
for (vpn=start; vpn<start+npages; vpn++) {
|
||||
self[0][vpn] = 0;
|
||||
tlb_flush_one_page(vpn << PAGE_BITS);
|
||||
}
|
||||
|
||||
spinlock_irqsave_unlock(curr_task->page_lock);
|
||||
spinlock_unlock(&kslock);
|
||||
spinlock_irqsave_unlock(&page_lock);
|
||||
|
||||
/* This can't fail because we don't make checks here */
|
||||
return 0;
|
||||
}
|
||||
|
||||
int page_map_drop(void)
|
||||
{
|
||||
task_t* curr_task = per_core(current_task);
|
||||
|
||||
void traverse(int lvl, long vpn) {
|
||||
long stop;
|
||||
for (stop=vpn+PAGE_MAP_ENTRIES; vpn<stop; vpn++) {
|
||||
if ((self[lvl][vpn] & PG_PRESENT) && (self[lvl][vpn] & PG_USER)) {
|
||||
/* Post-order traversal */
|
||||
if (lvl)
|
||||
traverse(lvl-1, vpn<<PAGE_MAP_BITS);
|
||||
|
||||
put_pages(self[lvl][vpn] & PAGE_MASK, 1);
|
||||
atomic_int64_dec(curr_task->user_usage);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
spinlock_irqsave_lock(curr_task->page_lock);
|
||||
|
||||
traverse(PAGE_LEVELS-1, 0);
|
||||
|
||||
spinlock_irqsave_unlock(curr_task->page_lock);
|
||||
|
||||
/* This can't fail because we don't make checks here */
|
||||
return 0;
|
||||
}
|
||||
|
||||
int page_map_copy(task_t *dest)
|
||||
{
|
||||
task_t* curr_task = per_core(current_task);
|
||||
|
||||
int traverse(int lvl, long vpn) {
|
||||
long stop;
|
||||
for (stop=vpn+PAGE_MAP_ENTRIES; vpn<stop; vpn++) {
|
||||
if (self[lvl][vpn] & PG_PRESENT) {
|
||||
if (self[lvl][vpn] & PG_USER) {
|
||||
size_t phyaddr = get_pages(1);
|
||||
if (BUILTIN_EXPECT(!phyaddr, 0))
|
||||
return -ENOMEM;
|
||||
|
||||
atomic_int64_inc(dest->user_usage);
|
||||
|
||||
other[lvl][vpn] = phyaddr | (self[lvl][vpn] & ~PAGE_MASK);
|
||||
if (lvl) /* PML4, PDPT, PGD */
|
||||
traverse(lvl-1, vpn<<PAGE_MAP_BITS); /* Pre-order traversal */
|
||||
else { /* PGT */
|
||||
page_map(PAGE_TMP, phyaddr, 1, PG_RW);
|
||||
memcpy((void*) PAGE_TMP, (void*) (vpn<<PAGE_BITS), PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
else if (self[lvl][vpn] & PG_SELF)
|
||||
other[lvl][vpn] = 0;
|
||||
else
|
||||
other[lvl][vpn] = self[lvl][vpn];
|
||||
}
|
||||
else
|
||||
other[lvl][vpn] = 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
// set present bit
|
||||
dest->page_map |= PG_PRESENT;
|
||||
|
||||
spinlock_irqsave_lock(curr_task->page_lock);
|
||||
self[PAGE_LEVELS-1][PAGE_MAP_ENTRIES-2] = dest->page_map | PG_PRESENT | PG_SELF | PG_ACCESSED | PG_RW;
|
||||
|
||||
int ret = traverse(PAGE_LEVELS-1, 0);
|
||||
|
||||
other[PAGE_LEVELS-1][PAGE_MAP_ENTRIES-1] = dest->page_map | PG_PRESENT | PG_SELF | PG_ACCESSED | PG_RW;
|
||||
self [PAGE_LEVELS-1][PAGE_MAP_ENTRIES-2] = 0;
|
||||
spinlock_irqsave_unlock(curr_task->page_lock);
|
||||
|
||||
/* Flush TLB entries of 'other' self-reference */
|
||||
flush_tlb();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void page_fault_handler(struct state *s)
|
||||
{
|
||||
size_t viraddr = read_cr2();
|
||||
|
@ -309,7 +217,7 @@ void page_fault_handler(struct state *s)
|
|||
return 1;
|
||||
}
|
||||
|
||||
spinlock_irqsave_lock(task->page_lock);
|
||||
spinlock_irqsave_lock(&page_lock);
|
||||
|
||||
if ((task->heap) && (viraddr >= task->heap->start) && (viraddr < task->heap->end)) {
|
||||
/*
|
||||
|
@ -317,7 +225,7 @@ void page_fault_handler(struct state *s)
|
|||
*/
|
||||
if (check_pagetables(viraddr)) {
|
||||
//tlb_flush_one_page(viraddr);
|
||||
spinlock_irqsave_unlock(task->page_lock);
|
||||
spinlock_irqsave_unlock(&page_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -341,13 +249,13 @@ void page_fault_handler(struct state *s)
|
|||
|
||||
// TODO: reusing of old data is possible => security issue
|
||||
|
||||
spinlock_irqsave_unlock(task->page_lock);
|
||||
spinlock_irqsave_unlock(&page_lock);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
default_handler:
|
||||
spinlock_irqsave_unlock(task->page_lock);
|
||||
spinlock_irqsave_unlock(&page_lock);
|
||||
|
||||
kprintf("Page Fault Exception (%d) on core %d at cs:ip = %#x:%#lx, fs = %#lx, gs = %#lx, rflags 0x%lx, task = %u, addr = %#lx, error = %#x [ %s %s %s %s %s ]\n",
|
||||
s->int_no, CORE_ID, s->cs, s->rip, s->fs, s->gs, s->rflags, task->id, viraddr, s->error,
|
||||
|
|
|
@ -57,14 +57,4 @@ int put_pages(size_t phyaddr, size_t npages);
|
|||
*/
|
||||
static inline int put_page(size_t phyaddr) { return put_pages(phyaddr, 1); }
|
||||
|
||||
/** @brief Copy a physical page frame
|
||||
*
|
||||
* @param psrc physical address of source page frame
|
||||
* @param pdest physical address of source page frame
|
||||
* @return
|
||||
* - 0 on success
|
||||
* - -1 on failure
|
||||
*/
|
||||
int copy_page(size_t pdest, size_t psrc);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -62,19 +62,6 @@ void NORETURN do_abort(void);
|
|||
*/
|
||||
void* palloc(size_t sz, uint32_t flags);
|
||||
|
||||
/** @brief Free general kernel pages
|
||||
*
|
||||
* This function removes the memory from the VMA subsystem,
|
||||
* unmap the pages and releases the physical pages.
|
||||
*
|
||||
* The pmalloc() doesn't track how much memory was allocated for which pointer,
|
||||
* so you have to specify how much memory shall be freed.
|
||||
*
|
||||
* @param addr The virtual address returned by palloc().
|
||||
* @param sz The size which should freed
|
||||
*/
|
||||
void pfree(void* addr, size_t sz);
|
||||
|
||||
/** @brief The memory allocator function
|
||||
*
|
||||
* This allocator uses a buddy system to allocate memory.
|
||||
|
|
|
@ -86,14 +86,6 @@ typedef struct task {
|
|||
uint8_t prio;
|
||||
/// timeout for a blocked task
|
||||
uint64_t timeout;
|
||||
/// Physical address of root page table
|
||||
size_t page_map;
|
||||
/// Lock for page tables
|
||||
spinlock_irqsave_t* page_lock;
|
||||
/// lock for the VMA_list
|
||||
spinlock_t* vma_lock;
|
||||
/// list of VMAs
|
||||
vma_t* vma_list;
|
||||
/// starting time/tick of the task
|
||||
uint64_t start_tick;
|
||||
/// the userspace heap
|
||||
|
|
|
@ -59,12 +59,10 @@ extern "C" {
|
|||
#define VMA_HEAP (VMA_READ|VMA_WRITE|VMA_CACHEABLE)
|
||||
|
||||
// boundaries for VAS allocation
|
||||
#define VMA_KERN_MIN 0xC0000
|
||||
#define VMA_KERN_MAX KERNEL_SPACE
|
||||
#define VMA_USER_MIN KERNEL_SPACE
|
||||
#define VMA_MIN 0xC0000
|
||||
|
||||
// last three top level entries are reserved
|
||||
#define VMA_USER_MAX 0xFFFFFE8000000000
|
||||
#define VMA_MAX 0xFFFFFE8000000000
|
||||
|
||||
struct vma;
|
||||
|
||||
|
@ -130,21 +128,6 @@ size_t vma_alloc(size_t size, uint32_t flags);
|
|||
*/
|
||||
int vma_free(size_t start, size_t end);
|
||||
|
||||
/** @brief Free all virtual memory areas
|
||||
*
|
||||
* @return
|
||||
* - 0 on success
|
||||
*/
|
||||
int drop_vma_list(struct task* task);
|
||||
|
||||
/** @brief Copy the VMA list of the current task to task
|
||||
*
|
||||
* @param task The task where the list should be copied to
|
||||
* @return
|
||||
* - 0 on success
|
||||
*/
|
||||
int copy_vma_list(struct task* src, struct task* dest);
|
||||
|
||||
/** @brief Dump information about this task's VMAs into the terminal. */
|
||||
void vma_dump(void);
|
||||
|
||||
|
|
|
@ -215,13 +215,14 @@ ssize_t writev(int fildes, const struct iovec *iov, int iovcnt)
|
|||
|
||||
ssize_t sys_sbrk(ssize_t incr)
|
||||
{
|
||||
task_t* task = per_core(current_task);
|
||||
vma_t* heap = task->heap;
|
||||
ssize_t ret;
|
||||
vma_t* heap = per_core(current_task)->heap;
|
||||
static spinlock_t heap_lock = SPINLOCK_INIT;
|
||||
|
||||
spinlock_lock(task->vma_lock);
|
||||
spinlock_lock(&heap_lock);
|
||||
|
||||
if (BUILTIN_EXPECT(!heap, 0)) {
|
||||
spinlock_unlock(&heap_lock);
|
||||
kprintf("sys_sbrk: missing heap!\n");
|
||||
do_abort();
|
||||
}
|
||||
|
@ -234,7 +235,7 @@ ssize_t sys_sbrk(ssize_t incr)
|
|||
// allocation and mapping of new pages for the heap
|
||||
// is catched by the pagefault handler
|
||||
|
||||
spinlock_unlock(task->vma_lock);
|
||||
spinlock_unlock(&heap_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -46,20 +46,13 @@ extern const void tls_end;
|
|||
|
||||
#define TLS_OFFSET 8
|
||||
|
||||
/*
|
||||
* HermitCore is a single address space OS
|
||||
* => we need only a lock to protect the page tables & VMA
|
||||
*/
|
||||
static spinlock_irqsave_t page_lock = SPINLOCK_IRQSAVE_INIT;
|
||||
static spinlock_t vma_lock = SPINLOCK_INIT;
|
||||
|
||||
/** @brief Array of task structures (aka PCB)
|
||||
*
|
||||
* A task's id will be its position in this array.
|
||||
*/
|
||||
static task_t task_table[MAX_TASKS] = { \
|
||||
[0] = {0, TASK_IDLE, 0, NULL, NULL, TASK_DEFAULT_FLAGS, 0, 0, 0, &page_lock, &vma_lock, NULL, 0, NULL, NULL, 0, NULL, NULL, 0, 0, 0}, \
|
||||
[1 ... MAX_TASKS-1] = {0, TASK_INVALID, 0, NULL, NULL, TASK_DEFAULT_FLAGS, 0, 0, 0, &page_lock, &vma_lock, NULL, 0, NULL, NULL, 0, NULL, NULL, 0, 0, 0}};
|
||||
[0] = {0, TASK_IDLE, 0, NULL, NULL, TASK_DEFAULT_FLAGS, 0, 0, 0, NULL, NULL, 0, NULL, NULL, 0, 0, 0}, \
|
||||
[1 ... MAX_TASKS-1] = {0, TASK_INVALID, 0, NULL, NULL, TASK_DEFAULT_FLAGS, 0, 0, 0, NULL, NULL, 0, NULL, NULL, 0, 0, 0}};
|
||||
|
||||
static spinlock_irqsave_t table_lock = SPINLOCK_IRQSAVE_INIT;
|
||||
|
||||
|
@ -115,7 +108,6 @@ int multitasking_init(void)
|
|||
task_table[0].stack = (char*) ((size_t)&boot_stack + core_id * KERNEL_STACK_SIZE);
|
||||
set_per_core(kernel_stack, task_table[0].stack + KERNEL_STACK_SIZE - 0x10);
|
||||
set_per_core(current_task, task_table+0);
|
||||
task_table[0].page_map = read_cr3();
|
||||
|
||||
readyqueues[core_id].idle = task_table+0;
|
||||
|
||||
|
@ -168,12 +160,8 @@ int set_idle_task(void)
|
|||
task_table[i].stack = (char*) ((size_t)&boot_stack + core_id * KERNEL_STACK_SIZE);
|
||||
set_per_core(kernel_stack, task_table[i].stack + KERNEL_STACK_SIZE - 0x10);
|
||||
task_table[i].prio = IDLE_PRIO;
|
||||
task_table[i].vma_lock = &vma_lock;
|
||||
task_table[i].vma_list = NULL;
|
||||
task_table[i].heap = NULL;
|
||||
task_table[i].page_lock = &page_lock;
|
||||
task_table[i].user_usage = NULL;
|
||||
task_table[i].page_map = read_cr3();
|
||||
readyqueues[core_id].idle = task_table+i;
|
||||
set_per_core(current_task, readyqueues[core_id].idle);
|
||||
ret = 0;
|
||||
|
@ -369,7 +357,6 @@ int clone_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio)
|
|||
task_table[i].last_stack_pointer = NULL;
|
||||
task_table[i].stack = stack;
|
||||
task_table[i].prio = prio;
|
||||
task_table[i].vma_list = curr_task->vma_list;
|
||||
task_table[i].heap = curr_task->heap;
|
||||
task_table[i].start_tick = get_clock_tick();
|
||||
task_table[i].parent = curr_task->id;
|
||||
|
@ -377,7 +364,6 @@ int clone_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio)
|
|||
task_table[i].tls_size = curr_task->tls_size;
|
||||
task_table[i].lwip_err = 0;
|
||||
task_table[i].user_usage = curr_task->user_usage;
|
||||
task_table[i].page_map = curr_task->page_map;
|
||||
|
||||
if (id)
|
||||
*id = i;
|
||||
|
@ -428,7 +414,6 @@ int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio, uint32_t c
|
|||
uint32_t i;
|
||||
void* stack = NULL;
|
||||
void* counter = NULL;
|
||||
task_t* curr_task;
|
||||
|
||||
if (BUILTIN_EXPECT(!ep, 0))
|
||||
return -EINVAL;
|
||||
|
@ -441,8 +426,6 @@ int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio, uint32_t c
|
|||
if (BUILTIN_EXPECT(!readyqueues[core_id].idle, 0))
|
||||
return -EINVAL;
|
||||
|
||||
curr_task = per_core(current_task);
|
||||
|
||||
stack = kmalloc(DEFAULT_STACK_SIZE);
|
||||
if (BUILTIN_EXPECT(!stack, 0))
|
||||
return -ENOMEM;
|
||||
|
@ -464,17 +447,12 @@ int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio, uint32_t c
|
|||
task_table[i].last_stack_pointer = NULL;
|
||||
task_table[i].stack = stack;
|
||||
task_table[i].prio = prio;
|
||||
task_table[i].vma_lock = &vma_lock;
|
||||
task_table[i].vma_list = NULL;
|
||||
task_table[i].heap = NULL;
|
||||
task_table[i].start_tick = get_clock_tick();
|
||||
task_table[i].parent = 0;
|
||||
task_table[i].tls_addr = 0;
|
||||
task_table[i].tls_size = 0;
|
||||
task_table[i].lwip_err = 0;
|
||||
|
||||
task_table[i].page_lock = &page_lock;
|
||||
task_table[i].page_map = curr_task->page_map;
|
||||
task_table[i].user_usage = (atomic_int64_t*) counter;
|
||||
|
||||
if (id)
|
||||
|
|
|
@ -161,26 +161,6 @@ void* palloc(size_t sz, uint32_t flags)
|
|||
return (void*) viraddr;
|
||||
}
|
||||
|
||||
void pfree(void* addr, size_t sz)
|
||||
{
|
||||
if (BUILTIN_EXPECT(!addr || !sz, 0))
|
||||
return;
|
||||
|
||||
size_t i;
|
||||
size_t phyaddr;
|
||||
size_t viraddr = (size_t) addr & PAGE_MASK;
|
||||
uint32_t npages = PAGE_FLOOR(sz) >> PAGE_BITS;
|
||||
|
||||
// memory is probably not continuously mapped! (userspace heap)
|
||||
for (i=0; i<npages; i++) {
|
||||
phyaddr = virt_to_phys(viraddr+i*PAGE_SIZE);
|
||||
put_page(phyaddr);
|
||||
}
|
||||
|
||||
page_unmap(viraddr, npages);
|
||||
vma_free(viraddr, viraddr+npages*PAGE_SIZE);
|
||||
}
|
||||
|
||||
void* kmalloc(size_t sz)
|
||||
{
|
||||
if (BUILTIN_EXPECT(!sz, 0))
|
||||
|
|
|
@ -152,43 +152,6 @@ out_err:
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
int copy_page(size_t pdest, size_t psrc)
|
||||
{
|
||||
int err;
|
||||
|
||||
static size_t viraddr;
|
||||
if (!viraddr) { // statically allocate virtual memory area
|
||||
viraddr = vma_alloc(2 * PAGE_SIZE, VMA_HEAP);
|
||||
if (BUILTIN_EXPECT(!viraddr, 0))
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
// map pages
|
||||
size_t vsrc = viraddr;
|
||||
err = page_map(vsrc, psrc, 1, PG_GLOBAL|PG_RW);
|
||||
if (BUILTIN_EXPECT(err, 0)) {
|
||||
page_unmap(viraddr, 1);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
size_t vdest = viraddr + PAGE_SIZE;
|
||||
err = page_map(vdest, pdest, 1, PG_GLOBAL|PG_RW);
|
||||
if (BUILTIN_EXPECT(err, 0)) {
|
||||
page_unmap(viraddr + PAGE_SIZE, 1);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
kprintf("copy_page: copy page frame from: %#lx (%#lx) to %#lx (%#lx)\n", vsrc, psrc, vdest, pdest); // TODO remove
|
||||
|
||||
// copy the whole page
|
||||
memcpy((void*) vdest, (void*) vsrc, PAGE_SIZE);
|
||||
|
||||
// householding
|
||||
page_unmap(viraddr, 2);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int memory_init(void)
|
||||
{
|
||||
size_t addr, image_size = (size_t) &kernel_end - (size_t) &kernel_start;
|
||||
|
|
128
hermit/mm/vma.c
128
hermit/mm/vma.c
|
@ -48,7 +48,7 @@ extern const void kernel_end;
|
|||
* For bootstrapping we initialize the VMA list with one empty VMA
|
||||
* (start == end) and expand this VMA by calls to vma_alloc()
|
||||
*/
|
||||
static vma_t vma_boot = { VMA_KERN_MIN, VMA_KERN_MIN, VMA_HEAP };
|
||||
static vma_t vma_boot = { VMA_MIN, VMA_MIN, VMA_HEAP };
|
||||
static vma_t* vma_list = &vma_boot;
|
||||
static spinlock_t vma_lock = SPINLOCK_INIT;
|
||||
|
||||
|
@ -75,27 +75,17 @@ out:
|
|||
|
||||
size_t vma_alloc(size_t size, uint32_t flags)
|
||||
{
|
||||
task_t* task = per_core(current_task);
|
||||
spinlock_t* lock;
|
||||
vma_t** list;
|
||||
spinlock_t* lock = &vma_lock;
|
||||
vma_t** list = &vma_list;
|
||||
|
||||
//kprintf("vma_alloc: size = %#lx, flags = %#x\n", size, flags);
|
||||
|
||||
size_t base, limit; // boundaries for search
|
||||
size_t start, end; // boundaries of free gaps
|
||||
// boundaries of free gaps
|
||||
size_t start, end;
|
||||
|
||||
if (flags & VMA_USER) {
|
||||
base = VMA_USER_MIN;
|
||||
limit = VMA_USER_MAX;
|
||||
list = &task->vma_list;
|
||||
lock = task->vma_lock;
|
||||
}
|
||||
else {
|
||||
base = VMA_KERN_MIN;
|
||||
limit = VMA_KERN_MAX;
|
||||
list = &vma_list;
|
||||
lock = &vma_lock;
|
||||
}
|
||||
// boundaries for search
|
||||
size_t base = VMA_MIN;
|
||||
size_t limit = VMA_MAX;
|
||||
|
||||
spinlock_lock(lock);
|
||||
|
||||
|
@ -148,28 +138,15 @@ found:
|
|||
|
||||
int vma_free(size_t start, size_t end)
|
||||
{
|
||||
task_t* task = per_core(current_task);
|
||||
spinlock_t* lock;
|
||||
spinlock_t* lock = &vma_lock;
|
||||
vma_t* vma;
|
||||
vma_t** list = NULL;
|
||||
vma_t** list = &vma_list;
|
||||
|
||||
//kprintf("vma_free: start = %#lx, end = %#lx\n", start, end);
|
||||
|
||||
if (BUILTIN_EXPECT(start >= end, 0))
|
||||
return -EINVAL;
|
||||
|
||||
if (end < VMA_KERN_MAX) {
|
||||
lock = &vma_lock;
|
||||
list = &vma_list;
|
||||
}
|
||||
else if (start >= VMA_KERN_MAX) {
|
||||
lock = task->vma_lock;
|
||||
list = &task->vma_list;
|
||||
}
|
||||
|
||||
if (BUILTIN_EXPECT(!list || !*list, 0))
|
||||
return -EINVAL;
|
||||
|
||||
spinlock_lock(lock);
|
||||
|
||||
// search vma
|
||||
|
@ -221,30 +198,12 @@ int vma_free(size_t start, size_t end)
|
|||
|
||||
int vma_add(size_t start, size_t end, uint32_t flags)
|
||||
{
|
||||
task_t* task = per_core(current_task);
|
||||
spinlock_t* lock;
|
||||
vma_t** list;
|
||||
spinlock_t* lock = &vma_lock;
|
||||
vma_t** list = &vma_list;
|
||||
|
||||
if (BUILTIN_EXPECT(start >= end, 0))
|
||||
return -EINVAL;
|
||||
|
||||
if (flags & VMA_USER) {
|
||||
list = &task->vma_list;
|
||||
lock = task->vma_lock;
|
||||
|
||||
// check if address is in userspace
|
||||
if (BUILTIN_EXPECT(start < VMA_KERN_MAX, 0))
|
||||
return -EINVAL;
|
||||
}
|
||||
else {
|
||||
list = &vma_list;
|
||||
lock = &vma_lock;
|
||||
|
||||
// check if address is in kernelspace
|
||||
if (BUILTIN_EXPECT(end >= VMA_KERN_MAX, 0))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
//kprintf("vma_add: start = %#lx, end = %#lx, flags = %#x\n", start, end, flags);
|
||||
|
||||
spinlock_lock(lock);
|
||||
|
@ -292,58 +251,6 @@ int vma_add(size_t start, size_t end, uint32_t flags)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int copy_vma_list(task_t* src, task_t* dest)
|
||||
{
|
||||
spinlock_init(dest->vma_lock);
|
||||
|
||||
spinlock_lock(src->vma_lock);
|
||||
spinlock_lock(dest->vma_lock);
|
||||
|
||||
vma_t* last = NULL;
|
||||
vma_t* old;
|
||||
for (old=src->vma_list; old; old=old->next) {
|
||||
vma_t *new = kmalloc(sizeof(vma_t));
|
||||
if (BUILTIN_EXPECT(!new, 0)) {
|
||||
spinlock_unlock(dest->vma_lock);
|
||||
spinlock_unlock(src->vma_lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
new->start = old->start;
|
||||
new->end = old->end;
|
||||
new->flags = old->flags;
|
||||
new->prev = last;
|
||||
|
||||
if (last)
|
||||
last->next = new;
|
||||
else
|
||||
dest->vma_list = new;
|
||||
|
||||
last = new;
|
||||
}
|
||||
|
||||
spinlock_unlock(dest->vma_lock);
|
||||
spinlock_unlock(src->vma_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int drop_vma_list(task_t *task)
|
||||
{
|
||||
vma_t* vma;
|
||||
|
||||
spinlock_lock(task->vma_lock);
|
||||
|
||||
while ((vma = task->vma_list)) {
|
||||
task->vma_list = vma->next;
|
||||
kfree(vma);
|
||||
}
|
||||
|
||||
spinlock_unlock(task->vma_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void vma_dump(void)
|
||||
{
|
||||
void print_vma(vma_t *vma) {
|
||||
|
@ -356,15 +263,8 @@ void vma_dump(void)
|
|||
}
|
||||
}
|
||||
|
||||
task_t* task = per_core(current_task);
|
||||
|
||||
kputs("Kernelspace VMAs:\n");
|
||||
kputs("VMAs:\n");
|
||||
spinlock_lock(&vma_lock);
|
||||
print_vma(vma_list);
|
||||
print_vma(&vma_boot);
|
||||
spinlock_unlock(&vma_lock);
|
||||
|
||||
kputs("Userspace VMAs:\n");
|
||||
spinlock_lock(task->vma_lock);
|
||||
print_vma(task->vma_list);
|
||||
spinlock_unlock(task->vma_lock);
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue