fixed sys_fork and sys_execve for new paging code
This commit is contained in:
parent
314fa85389
commit
273137f2e7
3 changed files with 72 additions and 23 deletions
|
@ -407,13 +407,6 @@ gdt_flush:
|
||||||
lgdt [gp]
|
lgdt [gp]
|
||||||
ret
|
ret
|
||||||
|
|
||||||
; determines the current instruction pointer (after the jmp)
|
|
||||||
global read_eip
|
|
||||||
read_eip:
|
|
||||||
pop rax ; Get the return address
|
|
||||||
jmp rax ; Return. Can't use RET because return
|
|
||||||
; address popped off the stack.
|
|
||||||
|
|
||||||
; In just a few pages in this tutorial, we will add our Interrupt
|
; In just a few pages in this tutorial, we will add our Interrupt
|
||||||
; Service Routines (ISRs) right here!
|
; Service Routines (ISRs) right here!
|
||||||
global isr0
|
global isr0
|
||||||
|
|
|
@ -59,7 +59,7 @@ int arch_fork(task_t* task)
|
||||||
{
|
{
|
||||||
struct state* state;
|
struct state* state;
|
||||||
task_t* curr_task = per_core(current_task);
|
task_t* curr_task = per_core(current_task);
|
||||||
size_t esp, state_size;
|
size_t state_size;
|
||||||
|
|
||||||
if (BUILTIN_EXPECT(!task, 0))
|
if (BUILTIN_EXPECT(!task, 0))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -78,6 +78,7 @@ int arch_fork(task_t* task)
|
||||||
memcpy(task->stack, curr_task->stack, KERNEL_STACK_SIZE);
|
memcpy(task->stack, curr_task->stack, KERNEL_STACK_SIZE);
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
|
size_t esp;
|
||||||
asm volatile ("mov %%esp, %0" : "=m"(esp));
|
asm volatile ("mov %%esp, %0" : "=m"(esp));
|
||||||
esp -= (size_t) curr_task->stack;
|
esp -= (size_t) curr_task->stack;
|
||||||
esp += (size_t) task->stack;
|
esp += (size_t) task->stack;
|
||||||
|
@ -107,8 +108,48 @@ int arch_fork(task_t* task)
|
||||||
// This will be the entry point for the new task. read_ip cleanups the stack
|
// This will be the entry point for the new task. read_ip cleanups the stack
|
||||||
asm volatile ("push %0; call read_ip" :: "r"(&state->eip) : "%eax");
|
asm volatile ("push %0; call read_ip" :: "r"(&state->eip) : "%eax");
|
||||||
#else
|
#else
|
||||||
#warning Currently, not supported!
|
size_t rsp;
|
||||||
return -1;
|
asm volatile ("mov %%rsp, %0" : "=m"(rsp));
|
||||||
|
rsp -= (size_t) curr_task->stack;
|
||||||
|
rsp += (size_t) task->stack;
|
||||||
|
|
||||||
|
state = (struct state*) (rsp - state_size);
|
||||||
|
//memset(state, 0x00, state_size);
|
||||||
|
|
||||||
|
asm volatile ("push %rax");
|
||||||
|
asm volatile ("push %rcx");
|
||||||
|
asm volatile ("push %rdx");
|
||||||
|
asm volatile ("push %rbx");
|
||||||
|
asm volatile ("push %rbp");
|
||||||
|
asm volatile ("push %rsi");
|
||||||
|
asm volatile ("push %rdi");
|
||||||
|
asm volatile ("push %r8");
|
||||||
|
asm volatile ("push %r9");
|
||||||
|
asm volatile ("push %r10");
|
||||||
|
asm volatile ("push %r11");
|
||||||
|
|
||||||
|
asm volatile ("pop %0" : "=m"(state->r11));
|
||||||
|
asm volatile ("pop %0" : "=m"(state->r10));
|
||||||
|
asm volatile ("pop %0" : "=m"(state->r9));
|
||||||
|
asm volatile ("pop %0" : "=m"(state->r8));
|
||||||
|
asm volatile ("pop %0" : "=m"(state->rdi));
|
||||||
|
asm volatile ("pop %0" : "=m"(state->rsi));
|
||||||
|
asm volatile ("pop %0" : "=m"(state->rbp));
|
||||||
|
asm volatile ("pop %0" : "=m"(state->rbx));
|
||||||
|
asm volatile ("pop %0" : "=m"(state->rdx));
|
||||||
|
asm volatile ("pop %0" : "=m"(state->rcx));
|
||||||
|
asm volatile ("pop %0" : "=m"(state->rax));
|
||||||
|
|
||||||
|
state->rsp = rsp;
|
||||||
|
task->last_stack_pointer = (size_t*) state;
|
||||||
|
state->int_no = 0xB16B00B5;
|
||||||
|
state->error = 0xC03DB4B3;
|
||||||
|
state->cs = 0x08;
|
||||||
|
state->ss = 0x10;
|
||||||
|
asm volatile ("pushf; pop %0" : "=m"(state->rflags)); // store the current RFLAGS
|
||||||
|
asm volatile ("leaq (%%rip), %0;": "=r"(state->rip)); // store current instruction pointer
|
||||||
|
|
||||||
|
state->rflags |= (1 << 9); // enable interrupts
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -266,7 +307,7 @@ void gdt_install(void)
|
||||||
gdt_set_gate(2, 0, limit,
|
gdt_set_gate(2, 0, limit,
|
||||||
GDT_FLAG_RING0 | GDT_FLAG_SEGMENT | GDT_FLAG_DATASEG | GDT_FLAG_PRESENT,
|
GDT_FLAG_RING0 | GDT_FLAG_SEGMENT | GDT_FLAG_DATASEG | GDT_FLAG_PRESENT,
|
||||||
GDT_FLAG_4K_GRAN | mode);
|
GDT_FLAG_4K_GRAN | mode);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Create code segement for userspace applications (ring 3)
|
* Create code segement for userspace applications (ring 3)
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -258,13 +258,6 @@ static void NORETURN do_exit(int arg) {
|
||||||
flags = irq_nested_disable();
|
flags = irq_nested_disable();
|
||||||
|
|
||||||
drop_vma_list();
|
drop_vma_list();
|
||||||
|
|
||||||
/*
|
|
||||||
* This marks all userpages as free. Nevertheless they are still existing
|
|
||||||
* and used by the MMU until the task finishes. Therefore we need to disable
|
|
||||||
* context switching by disabling interrupts (see above)! We may also make use
|
|
||||||
* of the TLB and global kernel pages.
|
|
||||||
*/
|
|
||||||
drop_page_map();
|
drop_page_map();
|
||||||
|
|
||||||
#if 1
|
#if 1
|
||||||
|
@ -368,13 +361,17 @@ static int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio, uin
|
||||||
new_task->id = i;
|
new_task->id = i;
|
||||||
new_task->status = TASK_READY;
|
new_task->status = TASK_READY;
|
||||||
new_task->last_stack_pointer = NULL;
|
new_task->last_stack_pointer = NULL;
|
||||||
new_task->stack = create_stack();
|
|
||||||
new_task->flags = TASK_DEFAULT_FLAGS;
|
new_task->flags = TASK_DEFAULT_FLAGS;
|
||||||
new_task->prio = prio;
|
new_task->prio = prio;
|
||||||
new_task->last_core = 0;
|
new_task->last_core = 0;
|
||||||
spinlock_init(&new_task->vma_lock);
|
spinlock_init(&new_task->vma_lock);
|
||||||
new_task->vma_list = NULL;
|
new_task->vma_list = NULL;
|
||||||
new_task->fildes_table = NULL;
|
new_task->fildes_table = NULL;
|
||||||
|
new_task->stack = create_stack();
|
||||||
|
if (BUILTIN_EXPECT(!new_task->stack, 0)) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
mailbox_wait_msg_init(&new_task->inbox);
|
mailbox_wait_msg_init(&new_task->inbox);
|
||||||
memset(new_task->outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS);
|
memset(new_task->outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS);
|
||||||
|
@ -435,16 +432,18 @@ int sys_fork(void)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
kprintf("sys_fork: parent id = %u, child id = %u\n", parent_task->id , child_task->id); // TODO: remove
|
||||||
|
|
||||||
atomic_int32_set(&child_task->user_usage, 0);
|
atomic_int32_set(&child_task->user_usage, 0);
|
||||||
|
|
||||||
ret = copy_page_map(child_task, 1);
|
ret = copy_page_map(child_task, 1);
|
||||||
if (ret < 0) {
|
if (BUILTIN_EXPECT(ret < 0, 0)) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = copy_vma_list(child_task);
|
ret = copy_vma_list(child_task);
|
||||||
if (BUILTIN_EXPECT(!ret, 0)) {
|
if (BUILTIN_EXPECT(ret < 0, 0)) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -452,6 +451,10 @@ int sys_fork(void)
|
||||||
child_task->id = i;
|
child_task->id = i;
|
||||||
child_task->last_stack_pointer = NULL;
|
child_task->last_stack_pointer = NULL;
|
||||||
child_task->stack = create_stack();
|
child_task->stack = create_stack();
|
||||||
|
if (BUILTIN_EXPECT(!child_task->stack, 0)) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
// init fildes_table
|
// init fildes_table
|
||||||
child_task->fildes_table = kmalloc(sizeof(filp_t)*NR_OPEN);
|
child_task->fildes_table = kmalloc(sizeof(filp_t)*NR_OPEN);
|
||||||
|
@ -712,7 +715,7 @@ static int load_task(load_args_t* largs)
|
||||||
memset((void*) stack, 0x00, npages*PAGE_SIZE);
|
memset((void*) stack, 0x00, npages*PAGE_SIZE);
|
||||||
|
|
||||||
// create vma regions for the user-level stack
|
// create vma regions for the user-level stack
|
||||||
flags = VMA_CACHEABLE;
|
flags = VMA_CACHEABLE | VMA_USER;
|
||||||
if (prog_header.flags & PF_R)
|
if (prog_header.flags & PF_R)
|
||||||
flags |= VMA_READ;
|
flags |= VMA_READ;
|
||||||
if (prog_header.flags & PF_W)
|
if (prog_header.flags & PF_W)
|
||||||
|
@ -899,6 +902,9 @@ int sys_execve(const char* fname, char** argv, char** env)
|
||||||
char *dest, *src;
|
char *dest, *src;
|
||||||
int ret, argc = 0;
|
int ret, argc = 0;
|
||||||
int envc = 0;
|
int envc = 0;
|
||||||
|
task_t* curr_task = per_core(current_task);
|
||||||
|
|
||||||
|
kprintf("sys_execve: fname = %s, argv = %p, env = %p\n", fname, argv, env); // TODO: remove
|
||||||
|
|
||||||
node = findnode_fs((char*) fname);
|
node = findnode_fs((char*) fname);
|
||||||
if (!node || !(node->type == FS_FILE))
|
if (!node || !(node->type == FS_FILE))
|
||||||
|
@ -941,8 +947,17 @@ int sys_execve(const char* fname, char** argv, char** env)
|
||||||
while ((*dest++ = *src++) != 0);
|
while ((*dest++ = *src++) != 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
spinlock_lock(&curr_task->vma_lock);
|
||||||
|
|
||||||
// remove old program
|
// remove old program
|
||||||
drop_vma_list();
|
vma_t *vma;
|
||||||
|
for (vma=curr_task->vma_list; vma; vma = vma->next)
|
||||||
|
pfree((void*) vma->start, vma->end - vma->start);
|
||||||
|
|
||||||
|
// TODO: Heap?
|
||||||
|
|
||||||
|
spinlock_unlock(&curr_task->vma_lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* we use a trap gate to enter the kernel
|
* we use a trap gate to enter the kernel
|
||||||
|
|
Loading…
Add table
Reference in a new issue