code cleanup of task creation and some additions to the previous commit
This commit is contained in:
parent
6699886358
commit
d7644300a8
5 changed files with 177 additions and 159 deletions
|
@ -373,7 +373,7 @@ static inline void tlb_flush_one_page(size_t addr)
|
|||
* => User-level applications run only on one
|
||||
* and we didn't flush the TLB of the other cores
|
||||
*/
|
||||
if (addr <= KERNEL_SPACE)
|
||||
if (addr < KERNEL_SPACE)
|
||||
ipi_tlb_flush();
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -618,25 +618,10 @@ static void pagefault_handler(struct state *s)
|
|||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* handle missing paging structures for userspace
|
||||
* all kernel space paging structures have been initialized in entry64.asm
|
||||
*/
|
||||
if (viraddr >= PAGE_MAP_PGT) {
|
||||
kprintf("map_region: missing paging structure at: 0x%lx (%s)\n", viraddr, page_map_names[entry_to_level(viraddr)]);
|
||||
|
||||
size_t phyaddr = get_page();
|
||||
if (BUILTIN_EXPECT(!phyaddr, 0))
|
||||
goto oom;
|
||||
|
||||
// TODO: initialize with zeros
|
||||
// TODO: check that we are in userspace
|
||||
|
||||
// get pointer to parent page level entry
|
||||
size_t *entry = (size_t *) ((int64_t) viraddr >> 9 & ~0x07);
|
||||
|
||||
// update entry
|
||||
*entry = phyaddr|USER_TABLE;
|
||||
|
||||
return;
|
||||
}
|
||||
|
@ -652,8 +637,6 @@ default_handler:
|
|||
(s->error & 0x8) ? "reserved bit" : "\b",
|
||||
s->rflags, s->rax, s->rbx, s->rcx, s->rdx, s->rdi, s->rsi, s->rbp, s->rsp);
|
||||
|
||||
oom:
|
||||
kputs("map_region: out of memory\n");
|
||||
irq_enable();
|
||||
abort();
|
||||
}
|
||||
|
|
|
@ -35,21 +35,21 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define MAP_KERNEL_SPACE (1 << 0)
|
||||
#define MAP_USER_SPACE (1 << 1)
|
||||
#define MAP_PAGE_TABLE (1 << 2)
|
||||
#define MAP_NO_CACHE (1 << 3)
|
||||
#define MAP_WT (1 << 5)
|
||||
#define MAP_CODE (1 << 6)
|
||||
#define MAP_READONLY (1 << 7)
|
||||
#ifdef CONFIG_ROCKCREEK
|
||||
#define MAP_MPE (1 << 8)
|
||||
#endif
|
||||
#define MAP_SVM_STRONG (1 << 9)
|
||||
#define MAP_SVM_LAZYRELEASE (1 << 10)
|
||||
#define MAP_SVM_INIT (1 << 11)
|
||||
#define MAP_NO_ACCESS (1 << 12)
|
||||
#define MAP_REMAP (1 << 13)
|
||||
|
||||
#define MAP_NO_ACCESS (1 << 0)
|
||||
#define MAP_READ_ONLY (1 << 1)
|
||||
#define MAP_USER_SPACE (1 << 2)
|
||||
#define MAP_CODE (1 << 3)
|
||||
#define MAP_WT (1 << 4)
|
||||
#define MAP_NO_CACHE (1 << 5)
|
||||
#define MAP_MPE (1 << 6)
|
||||
#define MAP_SVM_STRONG (1 << 7)
|
||||
#define MAP_SVM_LAZYRELEASE (1 << 8)
|
||||
#define MAP_SVM_INIT (1 << 9)
|
||||
|
||||
#define MAP_KERNEL_SPACE (0 << 2) // legacy compatibility
|
||||
#define MAP_REMAP (1 << 12)
|
||||
//#define MAP_NON_CONTINUOUS (1 << 13) // TODO
|
||||
|
||||
void NORETURN abort(void);
|
||||
|
||||
|
|
|
@ -86,9 +86,9 @@ typedef struct task {
|
|||
struct task* prev;
|
||||
/// last core id on which the task was running
|
||||
uint32_t last_core;
|
||||
/// usage in number of pages
|
||||
/// usage in number of pages (including page map tables)
|
||||
atomic_int32_t user_usage;
|
||||
/// avoids concurrent access to the page map structures
|
||||
/// locks access to all page maps with PG_USER flag set
|
||||
spinlock_irqsave_t page_lock;
|
||||
/// pointer to page directory (32bit) or page map level 4 (64bit) table respectively
|
||||
page_map_t* page_map;
|
||||
|
@ -107,7 +107,7 @@ typedef struct task {
|
|||
/// LwIP error code
|
||||
int lwip_err;
|
||||
/// mail inbox
|
||||
mailbox_wait_msg_t inbox;
|
||||
mailbox_wait_msg_t inbox;
|
||||
/// mail outbox array
|
||||
mailbox_wait_msg_t* outbox[MAX_TASKS];
|
||||
/// FPU state
|
||||
|
|
281
kernel/tasks.c
281
kernel/tasks.c
|
@ -103,7 +103,12 @@ uint32_t get_highest_priority(void)
|
|||
*/
|
||||
static void* create_stack(void)
|
||||
{
|
||||
return palloc(KERNEL_STACK_SIZE, MAP_KERNEL_SPACE);
|
||||
/*
|
||||
* TODO: our stack should be non-executable!
|
||||
* We need this atm because nested functions in page64.c
|
||||
* are using trampolines on the stack.
|
||||
*/
|
||||
return palloc(KERNEL_STACK_SIZE, MAP_CODE);
|
||||
}
|
||||
|
||||
/** @brief Delete stack of a finished task
|
||||
|
@ -247,18 +252,26 @@ static void NORETURN do_exit(int arg) {
|
|||
}
|
||||
}
|
||||
}
|
||||
//finally the table has to be cleared.
|
||||
kfree(curr_task->fildes_table);
|
||||
|
||||
kfree(curr_task->fildes_table); // finally the table has to be cleared
|
||||
}
|
||||
|
||||
kprintf("Terminate task: %u, return value %d\n", curr_task->id, arg);
|
||||
|
||||
wakeup_blocked_tasks(arg);
|
||||
flags = irq_nested_disable();
|
||||
|
||||
drop_vma_list(); // kfree virtual memory areas and the vma_list
|
||||
drop_page_map(); // delete page directory and its page tables
|
||||
drop_vma_list();
|
||||
|
||||
#if 0
|
||||
/*
|
||||
* This marks all userpages as free. Nevertheless they are still existing
|
||||
* and used by the MMU until the task finishes. Therefore we need to disable
|
||||
* context switching by disabling interrupts (see above)! We may also make use
|
||||
* of the TLB and global kernel pages.
|
||||
*/
|
||||
drop_page_map();
|
||||
|
||||
#if 1
|
||||
if (atomic_int32_read(&curr_task->user_usage))
|
||||
kprintf("Memory leak! Task %d did not release %d pages\n",
|
||||
curr_task->id, atomic_int32_read(&curr_task->user_usage));
|
||||
|
@ -266,7 +279,6 @@ static void NORETURN do_exit(int arg) {
|
|||
curr_task->status = TASK_FINISHED;
|
||||
|
||||
// decrease the number of active tasks
|
||||
flags = irq_nested_disable();
|
||||
core_id = CORE_ID;
|
||||
spinlock_irqsave_lock(&runqueues[core_id].lock);
|
||||
runqueues[core_id].nr_tasks--;
|
||||
|
@ -312,6 +324,7 @@ void NORETURN abort(void) {
|
|||
static int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio, uint32_t core_id)
|
||||
{
|
||||
task_t* curr_task;
|
||||
task_t* new_task = NULL;
|
||||
int ret = -ENOMEM;
|
||||
uint32_t i;
|
||||
|
||||
|
@ -331,63 +344,73 @@ static int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio, uin
|
|||
#endif
|
||||
{
|
||||
core_id = CORE_ID;
|
||||
kprintf("Inavlid core id! Set id to %u!\n", core_id);
|
||||
kprintf("create_task: invalid core id! Set id to %u!\n", core_id);
|
||||
}
|
||||
curr_task = per_core(current_task);
|
||||
|
||||
// search free entry in task table
|
||||
for(i=0; i<MAX_TASKS; i++) {
|
||||
if (task_table[i].status == TASK_INVALID) {
|
||||
atomic_int32_set(&task_table[i].user_usage, 0);
|
||||
|
||||
ret = create_page_map(task_table+i, 0);
|
||||
if (ret < 0) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
task_table[i].id = i;
|
||||
task_table[i].status = TASK_READY;
|
||||
task_table[i].last_stack_pointer = NULL;
|
||||
task_table[i].stack = create_stack();
|
||||
task_table[i].flags = TASK_DEFAULT_FLAGS;
|
||||
task_table[i].prio = prio;
|
||||
task_table[i].last_core = 0;
|
||||
spinlock_init(&task_table[i].vma_lock);
|
||||
task_table[i].vma_list = NULL;
|
||||
task_table[i].fildes_table = NULL;
|
||||
mailbox_wait_msg_init(&task_table[i].inbox);
|
||||
memset(task_table[i].outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS);
|
||||
task_table[i].outbox[curr_task->id] = &curr_task->inbox;
|
||||
|
||||
if (id)
|
||||
*id = i;
|
||||
|
||||
ret = create_default_frame(task_table+i, ep, arg);
|
||||
|
||||
task_table[i].start_heap = 0;
|
||||
task_table[i].end_heap = 0;
|
||||
task_table[i].lwip_err = 0;
|
||||
task_table[i].start_tick = get_clock_tick();
|
||||
|
||||
// add task in the runqueue
|
||||
spinlock_irqsave_lock(&runqueues[core_id].lock);
|
||||
runqueues[core_id].prio_bitmap |= (1 << prio);
|
||||
runqueues[core_id].nr_tasks++;
|
||||
if (!runqueues[core_id].queue[prio-1].first) {
|
||||
task_table[i].next = task_table[i].prev = NULL;
|
||||
runqueues[core_id].queue[prio-1].first = task_table+i;
|
||||
runqueues[core_id].queue[prio-1].last = task_table+i;
|
||||
} else {
|
||||
task_table[i].prev = runqueues[core_id].queue[prio-1].last;
|
||||
task_table[i].next = NULL;
|
||||
runqueues[core_id].queue[prio-1].last->next = task_table+i;
|
||||
runqueues[core_id].queue[prio-1].last = task_table+i;
|
||||
}
|
||||
spinlock_irqsave_unlock(&runqueues[core_id].lock);
|
||||
new_task = &task_table[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (BUILTIN_EXPECT(!new_task, 0)) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
atomic_int32_set(&new_task->user_usage, 0);
|
||||
|
||||
ret = copy_page_map(new_task, 0);
|
||||
if (ret < 0) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
new_task->id = i;
|
||||
new_task->status = TASK_READY;
|
||||
new_task->last_stack_pointer = NULL;
|
||||
new_task->stack = create_stack();
|
||||
new_task->flags = TASK_DEFAULT_FLAGS;
|
||||
new_task->prio = prio;
|
||||
new_task->last_core = 0;
|
||||
spinlock_init(&new_task->vma_lock);
|
||||
new_task->vma_list = NULL;
|
||||
new_task->fildes_table = NULL;
|
||||
|
||||
mailbox_wait_msg_init(&new_task->inbox);
|
||||
memset(new_task->outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS);
|
||||
new_task->outbox[curr_task->id] = &curr_task->inbox;
|
||||
|
||||
if (id)
|
||||
*id = i;
|
||||
|
||||
ret = create_default_frame(new_task, ep, arg);
|
||||
|
||||
new_task->start_heap = 0;
|
||||
new_task->end_heap = 0;
|
||||
new_task->lwip_err = 0;
|
||||
new_task->start_tick = get_clock_tick();
|
||||
|
||||
// add task in the runqueue
|
||||
spinlock_irqsave_lock(&runqueues[core_id].lock);
|
||||
runqueues[core_id].prio_bitmap |= (1 << prio);
|
||||
runqueues[core_id].nr_tasks++;
|
||||
if (!runqueues[core_id].queue[prio-1].first) {
|
||||
new_task->next = new_task->prev = NULL;
|
||||
runqueues[core_id].queue[prio-1].first = new_task;
|
||||
runqueues[core_id].queue[prio-1].last = new_task;
|
||||
}
|
||||
else {
|
||||
new_task->prev = runqueues[core_id].queue[prio-1].last;
|
||||
new_task->next = NULL;
|
||||
runqueues[core_id].queue[prio-1].last->next = new_task;
|
||||
runqueues[core_id].queue[prio-1].last = new_task;
|
||||
}
|
||||
spinlock_irqsave_unlock(&runqueues[core_id].lock);
|
||||
|
||||
out:
|
||||
spinlock_irqsave_unlock(&table_lock);
|
||||
|
||||
|
@ -399,85 +422,97 @@ int sys_fork(void)
|
|||
int ret = -ENOMEM;
|
||||
unsigned int i, core_id, fd_i;
|
||||
task_t* parent_task = per_core(current_task);
|
||||
task_t* child_task = NULL;
|
||||
|
||||
spinlock_irqsave_lock(&table_lock);
|
||||
|
||||
core_id = CORE_ID;
|
||||
|
||||
// search free entry in task_table
|
||||
for(i=0; i<MAX_TASKS; i++) {
|
||||
if (task_table[i].status == TASK_INVALID) {
|
||||
atomic_int32_set(&task_table[i].user_usage, 0);
|
||||
|
||||
ret = create_page_map(task_table+i, 1);
|
||||
if (ret < 0) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = copy_vma_list(child_task);
|
||||
if (BUILTIN_EXPECT(!ret, 0)) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
task_table[i].id = i;
|
||||
task_table[i].last_stack_pointer = NULL;
|
||||
task_table[i].stack = create_stack();
|
||||
|
||||
// init fildes_table
|
||||
task_table[i].fildes_table = kmalloc(sizeof(filp_t)*NR_OPEN);
|
||||
memcpy(task_table[i].fildes_table, parent_task->fildes_table, sizeof(filp_t)*NR_OPEN);
|
||||
for (fd_i = 0; fd_i < NR_OPEN; fd_i++) {
|
||||
if ((task_table[i].fildes_table[fd_i]) != NULL)
|
||||
task_table[i].fildes_table[fd_i]->count++;
|
||||
}
|
||||
|
||||
mailbox_wait_msg_init(&task_table[i].inbox);
|
||||
memset(task_table[i].outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS);
|
||||
task_table[i].outbox[parent_task->id] = &parent_task->inbox;
|
||||
task_table[i].flags = parent_task->flags;
|
||||
memcpy(&(task_table[i].fpu), &(parent_task->fpu), sizeof(union fpu_state));
|
||||
task_table[i].start_tick = get_clock_tick();
|
||||
task_table[i].start_heap = 0;
|
||||
task_table[i].end_heap = 0;
|
||||
task_table[i].lwip_err = 0;
|
||||
task_table[i].prio = parent_task->prio;
|
||||
task_table[i].last_core = parent_task->last_core;
|
||||
|
||||
// add task in the runqueue
|
||||
spinlock_irqsave_lock(&runqueues[core_id].lock);
|
||||
runqueues[core_id].prio_bitmap |= (1 << parent_task->prio);
|
||||
runqueues[core_id].nr_tasks++;
|
||||
if (!runqueues[core_id].queue[parent_task->prio-1].first) {
|
||||
task_table[i].next = task_table[i].prev = NULL;
|
||||
runqueues[core_id].queue[parent_task->prio-1].first = task_table+i;
|
||||
runqueues[core_id].queue[parent_task->prio-1].last = task_table+i;
|
||||
} else {
|
||||
task_table[i].prev = runqueues[core_id].queue[parent_task->prio-1].last;
|
||||
task_table[i].next = NULL;
|
||||
runqueues[core_id].queue[parent_task->prio-1].last->next = task_table+i;
|
||||
runqueues[core_id].queue[parent_task->prio-1].last = task_table+i;
|
||||
}
|
||||
spinlock_irqsave_unlock(&runqueues[core_id].lock);
|
||||
|
||||
ret = arch_fork(task_table+i);
|
||||
|
||||
if (parent_task != per_core(current_task)) {
|
||||
// Oh, the current task is the new child task!
|
||||
// Leave the function without releasing the locks
|
||||
// because the locks are already released
|
||||
// by the parent task!
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!ret) {
|
||||
task_table[i].status = TASK_READY;
|
||||
ret = i;
|
||||
}
|
||||
child_task = &task_table[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (BUILTIN_EXPECT(!child_task, 0)) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
atomic_int32_set(&child_task->user_usage, 0);
|
||||
|
||||
ret = copy_page_map(child_task, 1);
|
||||
if (ret < 0) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = copy_vma_list(child_task);
|
||||
if (BUILTIN_EXPECT(!ret, 0)) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
child_task->id = i;
|
||||
child_task->last_stack_pointer = NULL;
|
||||
child_task->stack = create_stack();
|
||||
|
||||
// init fildes_table
|
||||
child_task->fildes_table = kmalloc(sizeof(filp_t)*NR_OPEN);
|
||||
memcpy(child_task->fildes_table, parent_task->fildes_table, sizeof(filp_t)*NR_OPEN);
|
||||
for (fd_i=0; fd_i<NR_OPEN; fd_i++) {
|
||||
if ((child_task->fildes_table[fd_i]) != NULL)
|
||||
child_task->fildes_table[fd_i]->count++;
|
||||
}
|
||||
|
||||
// init mailbox
|
||||
mailbox_wait_msg_init(&child_task->inbox);
|
||||
memset(child_task->outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS);
|
||||
child_task->outbox[parent_task->id] = &parent_task->inbox;
|
||||
|
||||
child_task->flags = parent_task->flags;
|
||||
memcpy(&child_task->fpu, &parent_task->fpu, sizeof(union fpu_state));
|
||||
child_task->start_tick = get_clock_tick();
|
||||
child_task->start_heap = 0;
|
||||
child_task->end_heap = 0;
|
||||
child_task->lwip_err = 0;
|
||||
child_task->prio = parent_task->prio;
|
||||
child_task->last_core = parent_task->last_core;
|
||||
|
||||
// add task in the runqueue
|
||||
spinlock_irqsave_lock(&runqueues[core_id].lock);
|
||||
runqueues[core_id].prio_bitmap |= (1 << parent_task->prio);
|
||||
runqueues[core_id].nr_tasks++;
|
||||
if (!runqueues[core_id].queue[parent_task->prio-1].first) {
|
||||
child_task->next = child_task->prev = NULL;
|
||||
runqueues[core_id].queue[parent_task->prio-1].first = child_task;
|
||||
runqueues[core_id].queue[parent_task->prio-1].last = child_task;
|
||||
}
|
||||
else {
|
||||
child_task->prev = runqueues[core_id].queue[parent_task->prio-1].last;
|
||||
child_task->next = NULL;
|
||||
runqueues[core_id].queue[parent_task->prio-1].last->next = child_task;
|
||||
runqueues[core_id].queue[parent_task->prio-1].last = child_task;
|
||||
}
|
||||
spinlock_irqsave_unlock(&runqueues[core_id].lock);
|
||||
|
||||
ret = arch_fork(child_task);
|
||||
|
||||
if (parent_task != per_core(current_task))
|
||||
/*
|
||||
* Oh, the current task is the new child task!
|
||||
* Leave the function without releasing the locks
|
||||
* because the locks are already released by the parent task!
|
||||
*/
|
||||
return 0;
|
||||
|
||||
if (!ret) {
|
||||
child_task->status = TASK_READY;
|
||||
ret = i;
|
||||
}
|
||||
|
||||
out:
|
||||
spinlock_irqsave_unlock(&table_lock);
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue