add some debug messages... by the initialization of the file descriptors, we avoid a race

This commit is contained in:
Stefan Lankes 2011-09-19 06:37:55 -07:00
parent 663af37539
commit 810c6db0a8

View file

@ -47,7 +47,7 @@
* A task's id will be its position in this array.
*/
static task_t task_table[MAX_TASKS] = { \
[0] = {0, TASK_IDLE, 0, 0, 0, NULL, NULL, 0, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, NULL, 0, 0, 0, 0}, \
[0] = {0, TASK_IDLE, 0, 0, 0, NULL, NULL, 0, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, NULL, 0, 0, 0, 0}, \
[1 ... MAX_TASKS-1] = {0, TASK_INVALID, 0, 0, 0, NULL, NULL, 0, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, NULL, 0, 0, 0, 0}};
static spinlock_irqsave_t table_lock = SPINLOCK_IRQSAVE_INIT;
@ -283,6 +283,7 @@ static int create_task(tid_t* id, internal_entry_point_t ep, void* arg, uint8_t
task_table[i].last_core = 0;
spinlock_init(&task_table[i].vma_lock);
task_table[i].vma_list = NULL;
task_table[i].fildes_table = NULL;
mailbox_wait_msg_init(&task_table[i].inbox);
memset(task_table[i].outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS);
task_table[i].outbox[curr_task->id] = &curr_task->inbox;
@ -302,11 +303,13 @@ static int create_task(tid_t* id, internal_entry_point_t ep, void* arg, uint8_t
runqueues[core_id].prio_bitmap |= (1 << prio);
runqueues[core_id].nr_tasks++;
if (!runqueues[core_id].queue[prio-1].first) {
//kprintf("add task %d at the begin of queue %d\n", i, prio-1);
task_table[i].prev = NULL;
runqueues[core_id].queue[prio-1].first = task_table+i;
runqueues[core_id].queue[prio-1].last = task_table+i;
task_table[i].next = NULL;
} else {
//kprintf("add task %d at the end of queue %d, first task %d\n", i, prio-1, runqueues[core_id].queue[prio-1].first->id);
task_table[i].prev = runqueues[core_id].queue[prio-1].last;
runqueues[core_id].queue[prio-1].last->next = task_table+i;
runqueues[core_id].queue[prio-1].last = task_table+i;
@ -519,6 +522,19 @@ static int load_task(load_args_t* largs)
if (!file->node)
return -EINVAL;
/* init fildes_table */
spinlock_irqsave_lock(&table_lock);
task_table[curr_task->id].fildes_table = kmalloc(sizeof(fildes_t)*NR_OPEN);
if (BUILTIN_EXPECT(!task_table[curr_task->id].fildes_table, 0)) {
spinlock_irqsave_unlock(&table_lock);
return -ENOMEM;
}
memset(task_table[curr_task->id].fildes_table, 0x00, sizeof(fildes_t)*NR_OPEN);
task_table[curr_task->id].fildes_table[0].node = findnode_fs("/dev/stdin");
task_table[curr_task->id].fildes_table[1].node = findnode_fs("/dev/stdout");
task_table[curr_task->id].fildes_table[2].node = findnode_fs("/dev/stderr");
spinlock_irqsave_unlock(&table_lock);
err = read_fs(file, (uint8_t*)&header, sizeof(elf_header_t));
if (err < 0) {
kprintf("read_fs failed: %d\n", err);
@ -734,7 +750,7 @@ static int STDCALL user_entry(void* arg)
int create_user_task(tid_t* id, const char* fname, char** argv)
{
vfs_node_t* node;
int argc = 0, ret = 0;
int argc = 0;
size_t i, buffer_size = 0;
load_args_t* load_args = NULL;
char *dest, *src;
@ -769,24 +785,8 @@ int create_user_task(tid_t* id, const char* fname, char** argv)
}
/*
* if 'tid_t' id is not initalized, create_task will not set 'tid_t id'
* We need the tid to initalize the fildes, thus we have to check this
*/
if (!id)
id = kmalloc(sizeof(tid_t));
/* == === create new task === == */
ret = create_task(id, user_entry, load_args, NORMAL_PRIO);
/* init fildes_table */
task_table[*id].fildes_table = kmalloc(sizeof(fildes_t)*NR_OPEN);
memset(task_table[*id].fildes_table, 0x00, sizeof(fildes_t)*NR_OPEN);
task_table[*id].fildes_table[0].node = findnode_fs("/dev/stdin");
task_table[*id].fildes_table[1].node = findnode_fs("/dev/stdout");
task_table[*id].fildes_table[2].node = findnode_fs("/dev/stderr");
return ret;
/* create new task */
return create_task(id, user_entry, load_args, NORMAL_PRIO);
}
/** @brief Used by the execve-Systemcall */
@ -1224,8 +1224,11 @@ void scheduler(void)
curr_task->last_core = core_id;
/* signalizes that this task could be reused */
if (curr_task->status == TASK_FINISHED)
if (curr_task->status == TASK_FINISHED) {
curr_task->status = TASK_INVALID;
//kprintf("finished task %d, runqueues[%d].idle->id = %d\n", curr_task->id, core_id, runqueues[core_id].idle->id);
//kprintf("msb %d\n", msb(runqueues[core_id].prio_bitmap));
}
/* if the task is using the FPU, we need to save the FPU context */
if (curr_task->flags & TASK_FPU_USED) {
@ -1295,6 +1298,16 @@ void scheduler(void)
}
curr_task = per_core(current_task) = runqueues[core_id].queue[prio-1].first;
/*if (curr_task->status == TASK_INVALID) {
task_t* tmp = curr_task;
kprintf("Upps..., got invalid task %d, orig task %d\n", curr_task->id, orig_task->id);
//kputs("Dump all tasks: ");
//while (tmp != NULL) {
// kprintf("%d ", tmp->id);
// tmp = tmp->next;
//}
//kputs("\n");
}*/
curr_task->status = TASK_RUNNING;
// remove new task from queue