- merge current eduOS branch to MetalSVM

=> add the support of "real" user-space applications, which could be loaded via initrd


git-svn-id: http://svn.lfbs.rwth-aachen.de/svn/scc/trunk/MetalSVM@394 315a16e6-25f9-4109-90ae-ca3045a26c18
This commit is contained in:
stefan 2011-02-08 18:37:56 +00:00
parent 98769944c8
commit 9a9df5ed2e
13 changed files with 141 additions and 153 deletions

View file

@ -114,6 +114,19 @@ typedef struct {
uint32_t alignment;
} __attribute__ ((packed)) elf_program_header_t;
typedef struct {
uint32_t name;
uint32_t type;
uint32_t flags;
uint32_t addr;
uint32_t offset;
uint32_t size;
uint32_t link;
uint32_t info;
uint32_t align;
uint32_t enttry_size;
} __attribute__ ((packed)) elf_section_header_t;
#ifdef __cplusplus
}
#endif

View file

@ -24,8 +24,9 @@
#include <metalsvm/stddef.h>
#include <metalsvm/tasks_types.h>
int create_default_frame(task_t* task, entry_point_t ep, void* arg, int user);
int create_default_frame(task_t* task, entry_point_t ep, void* arg);
int register_task(task_t* task);
int jump_to_user_code(uint32_t ep, uint32_t stack);
void reschedule(void);
#endif

View file

@ -53,25 +53,19 @@ int register_task(task_t* task) {
return 0;
}
int create_default_frame(task_t* task, entry_point_t ep, void* arg, int user)
int create_default_frame(task_t* task, entry_point_t ep, void* arg)
{
uint16_t cs = user ? 0x1B : 0x08;
uint16_t ds = user ? 0x23 : 0x10;
uint16_t cs = 0x08;
uint16_t ds = 0x10;
uint32_t id;
if (BUILTIN_EXPECT(!task, 0))
return -1;
if (BUILTIN_EXPECT(user && !task->ustack, 0))
return -1;
if (BUILTIN_EXPECT(user && !task->stack_size, 0))
return -1;
id = task->id;
/* reset buffers */
memset(task_state_segments+id, 0x00, sizeof(tss_t));
memset(kstacks[id], 0xCD, KERNEL_STACK_SIZE);
if (user)
memset(task->ustack, 0XCD, task->stack_size);
/* set default values of all regsiters */
task_state_segments[id].cs = cs;
@ -83,11 +77,7 @@ int create_default_frame(task_t* task, entry_point_t ep, void* arg, int user)
task_state_segments[id].eflags = 0x1202;
task_state_segments[id].cr3 = (uint32_t) (task->pgd);
task_state_segments[id].eip = (uint32_t) ep;
if (user) {
task_state_segments[id].esp = (uint32_t) task->ustack + task->stack_size - sizeof(size_t);
} else {
task_state_segments[id].esp = (uint32_t) kstacks[id] + KERNEL_STACK_SIZE - sizeof(size_t);
}
task_state_segments[id].esp = (uint32_t) kstacks[id] + KERNEL_STACK_SIZE - sizeof(size_t);
/* build default stack frame */
*((size_t*)task_state_segments[id].esp) = 0xDEADBEAF; /* dead-end */
@ -95,17 +85,20 @@ int create_default_frame(task_t* task, entry_point_t ep, void* arg, int user)
task_state_segments[id].esp -= sizeof(size_t);
*((size_t*)task_state_segments[id].esp) = (size_t) arg;
task_state_segments[id].esp -= sizeof(size_t);
if (user)
*((size_t*)task_state_segments[id].esp) = (size_t) leave_user_task;
else
*((size_t*)task_state_segments[id].esp) = (size_t) leave_kernel_task;
*((size_t*)task_state_segments[id].esp) = (size_t) leave_kernel_task;
/* setup for the kernel stack frame */
task_state_segments[id].ss0 = 0x10;
if (user)
task_state_segments[id].esp0 = (uint32_t) kstacks[id] + KERNEL_STACK_SIZE - sizeof(size_t);
else
task_state_segments[id].esp0 = 0; //task_state_segments[id].esp;
task_state_segments[id].esp0 = task_state_segments[id].esp;
return 0;
}
int jump_to_user_code(uint32_t ep, uint32_t stack)
{
asm volatile ("mov %0, %%ds; mov %0, %%fs; mov %0, %%gs; mov %0, %%es" :: "r"(0x23));
asm volatile ("push $0x23; push %0; push $0x1B; push %1" :: "r"(stack), "r"(ep));
asm volatile ("lret");
return 0;
}
@ -182,11 +175,11 @@ void gdt_install(void)
GDT_FLAG_4K_GRAN | GDT_FLAG_32_BIT);
/*
* Create a TSS for each task (we use these segments for task switching)
* Create TSS for each task at ring0 (we use these segments for task switching)
*/
for(i=0; i<MAX_TASKS; i++) {
gdt_set_gate(5+i, (unsigned long) (task_state_segments+i), sizeof(tss_t)-1,
GDT_FLAG_PRESENT | GDT_FLAG_TSS | GDT_FLAG_RING3,
GDT_FLAG_PRESENT | GDT_FLAG_TSS | GDT_FLAG_RING0,
GDT_FLAG_32_BIT);
}

View file

@ -119,9 +119,6 @@ size_t map_region(task_t* task, size_t viraddr, size_t phyaddr, uint32_t npages,
if (BUILTIN_EXPECT(!paging_enabled && (viraddr != phyaddr), 0))
return 0;
if (!(flags & MAP_KERNEL_SPACE))
return 0;
if (!viraddr) {
viraddr = vm_alloc(task, npages, flags);
if (BUILTIN_EXPECT(!viraddr, 0)) {
@ -146,7 +143,10 @@ size_t map_region(task_t* task, size_t viraddr, size_t phyaddr, uint32_t npages,
}
// set the new page table into the directory
task->pgd->entries[index] = (uint32_t)pgt|KERN_TABLE;
if (flags & MAP_USER_SPACE)
task->pgd->entries[index] = (uint32_t)pgt|USER_TABLE;
else
task->pgd->entries[index] = (uint32_t)pgt|KERN_TABLE;
// if paging is already enabled, we need to use the virtual address
if (paging_enabled)
@ -183,7 +183,11 @@ size_t map_region(task_t* task, size_t viraddr, size_t phyaddr, uint32_t npages,
return 0;
}
pgt->entries[index] = KERN_PAGE|(phyaddr & 0xFFFFF000);
if (flags & MAP_USER_SPACE)
pgt->entries[index] = USER_PAGE|(phyaddr & 0xFFFFF000);
else
pgt->entries[index] = KERN_PAGE|(phyaddr & 0xFFFFF000);
if (flags & MAP_NO_CACHE)
pgt->entries[index] |= PG_PCD;
@ -333,7 +337,7 @@ int arch_paging_init(void)
index2 = (viraddr >> 12) & 0x3FF;
// now, we create a self reference
per_core(current_task)->pgd->entries[index1] = (((size_t) pgt) & 0xFFFFF000)|USER_TABLE;
per_core(current_task)->pgd->entries[index1] = (((size_t) pgt) & 0xFFFFF000)|KERN_TABLE;
pgt->entries[index2] = ((size_t) pgt & 0xFFFFF000)|KERN_PAGE;
/*

View file

@ -40,7 +40,6 @@ void NORETURN abort(void);
void* kmalloc(size_t);
void* mem_allocation(size_t sz, uint32_t flags);
void kfree(void*, size_t);
void* create_stack(task_t* task, size_t sz);
#ifdef __cplusplus
}

View file

@ -42,9 +42,6 @@ struct spinlock;
typedef struct task {
tid_t id; /* task id = position in the task table */
uint32_t status;
unsigned char* ustack; /* stack of an user level task */
size_t stack_size; /* only user level tasks
* are able to specify its stack size */
atomic_int32_t mem_usage; /* in number of pages */
struct spinlock* pgd_lock; /* avoids concurrent access to the page directoriy */
struct page_dir* pgd; /* pointer to the page directory */

View file

@ -32,7 +32,7 @@
#include <asm/elf.h>
DEFINE_PER_CORE(task_t*, current_task, NULL);
static task_t task_table[MAX_TASKS] = {[0 ... MAX_TASKS-1] = {0, TASK_INVALID, NULL, 0, ATOMIC_INIT(0), NULL, NULL}};
static task_t task_table[MAX_TASKS] = {[0 ... MAX_TASKS-1] = {0, TASK_INVALID, ATOMIC_INIT(0), NULL, NULL}};
static spinlock_t table_lock = SPINLOCK_INIT;
/*
@ -50,9 +50,10 @@ int multitasking_init(void) {
if (task_table[i].status == TASK_INVALID) {
task_table[i].id = i;
task_table[i].status = TASK_RUNNING;
atomic_int32_set(&task_table[i].mem_usage, 0);
memset(task_table[i].mbox, 0x00, sizeof(mailbox_int32_t*)*MAX_TASKS);
per_core(current_task) = task_table+i;
get_kernel_pgd(per_core(current_task));
get_kernel_pgd(task_table+i);
return 0;
}
}
@ -81,8 +82,6 @@ static void NORETURN do_exit(int arg) {
kprintf("Terminate task: %u, return value %d\n", per_core(current_task)->id, arg);
wakeup_blocked_tasks(arg);
if (per_core(current_task)->ustack)
kfree(per_core(current_task)->ustack, per_core(current_task)->stack_size);
if (atomic_int32_read(&per_core(current_task)->mem_usage))
kprintf("Memory leak! Task %d did not release %d bytes\n", per_core(current_task)->id, atomic_int32_read(&per_core(current_task)->mem_usage));
per_core(current_task)->status = TASK_FINISHED;
@ -101,19 +100,6 @@ void NORETURN leave_kernel_task(void) {
do_exit(result);
}
void NORETURN leave_user_task(void) {
int result;
result = get_return_value();
SYSCALL1(__NR_exit, result);
kprintf("Kernel panic! Task %d comes back from syscall \"exit\"\n", per_core(current_task)->id);
while(1) {
NOP8;
}
}
void NORETURN sys_exit(int arg)
{
do_exit(arg);
@ -123,7 +109,7 @@ void NORETURN abort(void) {
do_exit(-1);
}
static int create_task(tid_t* id, entry_point_t ep, void* arg, size_t stack_size, int user)
static int create_task(tid_t* id, entry_point_t ep, void* arg)
{
int ret = -ENOMEM;
unsigned int i;
@ -131,36 +117,20 @@ static int create_task(tid_t* id, entry_point_t ep, void* arg, size_t stack_size
if (BUILTIN_EXPECT(!ep, 0))
return -EINVAL;
if (user && !stack_size)
stack_size = DEFAULT_STACK_SIZE;
spinlock_lock_irqsave(&table_lock);
for(i=0; i<MAX_TASKS; i++) {
if (task_table[i].status == TASK_INVALID) {
task_table[i].id = i;
task_table[i].status = TASK_READY;
if (user) {
task_table[i].ustack = create_stack(task_table+i, stack_size);
if (!task_table[i].ustack)
break;
task_table[i].stack_size = stack_size;
task_table[i].pgd = NULL;
task_table[i].pgd_lock = NULL;
} else {
task_table[i].ustack = NULL;
task_table[i].stack_size = 0;
get_kernel_pgd(task_table+i);
}
get_kernel_pgd(task_table+i);
atomic_int32_set(&task_table[i].mem_usage, 0);
memset(task_table[i].mbox, 0x00, sizeof(mailbox_int32_t*)*MAX_TASKS);
if (id)
*id = i;
ret = create_default_frame(task_table+i, ep, arg, user);
ret = create_default_frame(task_table+i, ep, arg);
break;
}
}
@ -172,51 +142,109 @@ static int create_task(tid_t* id, entry_point_t ep, void* arg, size_t stack_size
int create_kernel_task(tid_t* id, entry_point_t ep, void* arg)
{
return create_task(id, ep, arg, 0, 0);
return create_task(id, ep, arg);
}
int create_user_task(tid_t* id, size_t sz, const char* filename, int argc, char** argv)
static int STDCALL user_entry(void* arg)
{
vfs_node_t* node;
uint32_t i, addr, npages;
vfs_node_t* node = (vfs_node_t*) arg;
elf_header_t header;
page_dir_t* pgd;
elf_program_header_t prog_header;
elf_section_header_t sec_header;
node = findnode_fs(filename);
if (node && (node->type == FS_FILE)) {
kprintf("Found application %s at node %p\n", filename, node);
if (!node)
return -EINVAL;
read_fs(node, (uint8_t*)&header, sizeof(elf_header_t), 0);
if (BUILTIN_EXPECT(header.ident.magic != ELF_MAGIC, 0))
return -EINVAL;
read_fs(node, (uint8_t*)&header, sizeof(elf_header_t), 0);
if (BUILTIN_EXPECT(header.ident.magic != ELF_MAGIC, 0))
goto invalid;
if (BUILTIN_EXPECT(header.type != ELF_ET_EXEC, 0))
return -EINVAL;
if (BUILTIN_EXPECT(header.type != ELF_ET_EXEC, 0))
goto invalid;
if (BUILTIN_EXPECT(header.machine != ELF_EM_386, 0))
return -EINVAL;
if (BUILTIN_EXPECT(header.machine != ELF_EM_386, 0))
goto invalid;
if (BUILTIN_EXPECT(header.ident._class != ELF_CLASS_32, 0))
return -EINVAL;
if (BUILTIN_EXPECT(header.ident._class != ELF_CLASS_32, 0))
goto invalid;
if (BUILTIN_EXPECT(header.ident.data != ELF_DATA_2LSB, 0))
return -EINVAL;
if (BUILTIN_EXPECT(header.ident.data != ELF_DATA_2LSB, 0))
goto invalid;
kprintf("entry point at 0x%x\n", header.entry);
if (header.entry <= KERNEL_SPACE)
goto invalid;
pgd = create_pgd();
if (!pgd)
return -ENOMEM;
// interpret program header table
for (i=0; i<header.ph_entry_count; i++) {
if (read_fs(node, (uint8_t*)&prog_header, sizeof(elf_program_header_t), header.ph_offset+i*header.ph_entry_size) == 0) {
kprintf("Could not read programm header!\n");
continue;
}
return 0;
if (prog_header.type == ELF_PT_LOAD) { // load program segment
if (!prog_header.virt_addr)
continue;
npages = (prog_header.mem_size / PAGE_SIZE);
if (prog_header.mem_size % PAGE_SIZE)
npages++;
addr = get_pages(npages);
// map page frames in the address space of the current task
if (!map_region(per_core(current_task), prog_header.virt_addr, addr, npages, MAP_USER_SPACE))
kprintf("Could not map 0x%x at 0x%x\n", addr, prog_header.virt_addr);
// clear pages
memset((void*) prog_header.virt_addr, 0, npages*PAGE_SIZE);
// load program
read_fs(node, (uint8_t*)prog_header.virt_addr, prog_header.file_size, prog_header.offset);
}
}
// create user-level stack
npages = DEFAULT_STACK_SIZE / PAGE_SIZE;
if (DEFAULT_STACK_SIZE % PAGE_SIZE)
npages++;
addr = get_pages(npages);
if (!map_region(per_core(current_task), header.entry*2, addr, npages, MAP_USER_SPACE)) {
kprintf("Could not map stack at 0x%x\n", header.entry*2);
return -ENOMEM;
}
memset((void*) (header.entry*2), 0, npages*PAGE_SIZE);
// interpret section header table
for (i=0; i<header.sh_entry_count; i++) {
if (read_fs(node, (uint8_t*)&sec_header, sizeof(elf_section_header_t), header.sh_offset+i*header.sh_entry_size) == 0) {
kprintf("Could not read section header!\n");
continue;
}
// TODO: set page permissions
}
jump_to_user_code(header.entry, header.entry*2+npages*PAGE_SIZE-64);
return 0;
invalid:
kprintf("Invalid executable!\n");
return -EINVAL;
}
/*int create_user_task(tid_t* id, entry_point_t ep, void* arg, size_t stack_size)
int create_user_task(tid_t* id, size_t sz, const char* fname, int argc, char** argv)
{
return create_task(id, ep, arg, stack_size, 1);
}*/
vfs_node_t* node;
node = findnode_fs((char*) fname);
if (!node || !(node->type == FS_FILE))
return -EINVAL;
return create_task(id, user_entry, node);
}
int join_task(tid_t id, int* result)
{

View file

@ -109,7 +109,7 @@ int test_init(void)
//create_kernel_task(&id2, join_test, NULL);
//create_kernel_task(&id3, producer, NULL);
//create_kernel_task(&id4, consumer, NULL);
//create_user_task(&id5, 8192, "/bin/hello", 1, argv);
create_user_task(&id5, 8192, "/bin/hello", 1, argv);
return 0;
}

View file

@ -1,5 +1,6 @@
C_source = init.c chown.c environ.c execve.c fork.c fstat.c getpid.c gettod.c isatty.c link.c sbrk.c stat.c symlink.c times.c unlink.c wait.c kill.c lseek.c open.c close.c write.c read.c _exit.c errno.c
ASM_source =
ASM_FLAGS = -felf32
LIBNAME = libgloss.a
INC = -I../newlib/current/include

View file

@ -44,7 +44,6 @@ static size_t alloc_start;
atomic_int32_t total_pages = ATOMIC_INIT(0);
atomic_int32_t total_allocated_pages = ATOMIC_INIT(0);
atomic_int32_t total_available_pages = ATOMIC_INIT(0);
atomic_int32_t total_kernel_pages = ATOMIC_INIT(0);
/*
* Note that linker symbols are not variables, they have no memory allocated for
@ -222,10 +221,7 @@ next_try:
atomic_int32_add(&total_allocated_pages, npages);
atomic_int32_sub(&total_available_pages, npages);
if (task && task->ustack)
atomic_int32_add(&(task->mem_usage), npages);
else
atomic_int32_add(&total_kernel_pages, npages);
atomic_int32_add(&(task->mem_usage), npages);
return ret;
@ -262,42 +258,7 @@ void* mem_allocation(size_t sz, uint32_t flags)
void* kmalloc(size_t sz)
{
uint32_t flags;
task_t* task = per_core(current_task);
if (task->ustack)
flags = MAP_USER_SPACE|MAP_HEAP;
else
flags = MAP_KERNEL_SPACE|MAP_HEAP;
return mem_allocation(sz, flags);
}
void* create_stack(task_t* task, size_t sz)
{
size_t viraddr;
uint32_t npages = sz / PAGE_SIZE;
if (sz % PAGE_SIZE)
npages++;
spinlock_lock(task->pgd_lock);
size_t addr = (size_t) task_get_pages(task, npages);
if (BUILTIN_EXPECT(!addr, 0))
return 0;
spinlock_lock(task->pgd_lock);
/*
* We need only user-level stacks. Kernel stacks already initialized as
* static array.
*/
viraddr = map_region(task, 0, addr, npages, MAP_USER_SPACE|MAP_STACK);
spinlock_unlock(task->pgd_lock);
memset((unsigned char*)viraddr, 0xCD, sz);
return (void*) viraddr;
return mem_allocation(sz, MAP_KERNEL_SPACE|MAP_HEAP);
}
static void task_free(task_t* task, void* addr, size_t sz)
@ -321,16 +282,13 @@ static void task_free(task_t* task, void* addr, size_t sz)
for(i=0; i<npages; i++) {
phyaddr = virt_to_phys(task, (size_t) addr+i*PAGE_SIZE);
index = phyaddr / PAGE_SIZE;
page_unmarked(index);
page_clear_mark(index);
}
spinlock_unlock(&bitmap_lock);
atomic_int32_sub(&total_allocated_pages, npages);
atomic_int32_add(&total_available_pages, npages);
if (task && task->ustack)
atomic_int32_sub(&(task->mem_usage), npages);
else
atomic_int32_sub(&total_kernel_pages, npages);
atomic_int32_sub(&(task->mem_usage), npages);
}
void kfree(void* addr, size_t sz)

View file

@ -1,7 +1,7 @@
MAKE = make
CC = gcc
CFLAGS = -O2 -nostdinc -Wall -fno-builtin -I../current/include -I../../include -I../../arch/x86/include -fno-stack-protector
LDFLGAS = -nostdlib -L../current/lib/
CFLAGS = -m32 -O2 -nostdinc -Wall -fno-builtin -I../current/include -I../../include -I../../arch/x86/include -fno-stack-protector
LDFLGAS = -m32 -nostdlib -L../current/lib/
# other implicit rules
%.o : %.c

View file

@ -6,12 +6,6 @@ SEARCH_DIR(.)
__DYNAMIC = 0;
phys = 0x40200000;
/*
* Allocate the stack to be at the top of memory, since the stack
* grows down
*/
PROVIDE (__stack = 0x40500000);
/*
* Initalize some symbols to be zero so we can reference them in the
* crt0 without core dumping. These functions are all optional, but

View file

@ -1,7 +1,7 @@
MAKE = make
CC = gcc
CROSS_COMPILE=i386-unknown-linux-gnu-gcc
CFLAGS = -O2 -Wall
CFLAGS = -m32 -O2 -Wall
LDFLGAS =
DEFINES=
ASM = nasm