The context is now switched by software.

- Cleanup to be done
- Arch_fork does NOT work in this state
- Multiprocessing support not yet implemented
This commit is contained in:
Jacek Galowicz 2012-04-14 00:34:09 +02:00
parent 49533df72e
commit f4a7d6bd3a
5 changed files with 137 additions and 3 deletions

View file

@ -333,7 +333,8 @@ int test_init(void)
mailbox_int32_init(&mbox);
create_kernel_task(NULL, measure_ctx_switch, NULL, NORMAL_PRIO);
//create_kernel_task(NULL, foo, "Hello from foo1", NORMAL_PRIO);
create_kernel_task(NULL, foo, "Hello from foo1", NORMAL_PRIO);
create_kernel_task(NULL, foo, "Hello from foo2", NORMAL_PRIO);
//create_kernel_task(NULL, join_test, NULL, NORMAL_PRIO);
//create_kernel_task(NULL, producer, , NORMAL_PRIO);
//create_kernel_task(NULL, consumer, NULL, NORMAL_PRIO);

View file

@ -507,6 +507,39 @@ hack:
jmp 0x00 : 0xDEADBEAF
ret
global sw_switch_context
sw_switch_context:
;pushf
push DWORD 0x8
push DWORD [esp+4]
push DWORD 0
push DWORD 0xc0edbabe
pusha
push ds
push es
push fs
push gs
pushf
pop eax
mov [esp+64], eax
mov ecx, [esp+68]
mov [ecx], esp
mov ecx, [esp+72]
mov esp, [ecx]
sw_rollback:
pop gs
pop fs
pop es
pop ds
popa
add esp, 8
iret
; 32: IRQ0
irq0:
; irq0 - irq15 are registered as "Interrupt Gate"
@ -780,7 +813,7 @@ common_stub:
pop gs
pop fs
pop es
pop gs
pop ds
popa
add esp, 8

View file

@ -27,7 +27,11 @@
#include <asm/page.h>
gdt_ptr_t gp;
#ifdef SW_TASK_SWITCH
static tss_t task_state_segments[MAX_CORES] __attribute__ ((aligned (PAGE_SIZE)));
#else
static tss_t task_state_segments[MAX_TASKS] __attribute__ ((aligned (PAGE_SIZE)));
#endif
static unsigned char kstacks[MAX_TASKS][KERNEL_STACK_SIZE] __attribute__ ((aligned (PAGE_SIZE))) = {[0 ... MAX_TASKS-1][0 ... KERNEL_STACK_SIZE-1] = 0xCD};
uint32_t default_stack_pointer = (uint32_t) kstacks[0] + KERNEL_STACK_SIZE - sizeof(size_t);
// currently, our kernel has full access to the ioports
@ -71,6 +75,7 @@ int register_task(task_t* task) {
int arch_fork(task_t* task)
{
#ifndef SW_TASK_SWITCH
uint16_t cs = 0x08;
uint16_t ds = 0x10;
uint32_t id;
@ -120,6 +125,7 @@ int arch_fork(task_t* task)
asm volatile ("pushf; pop %%eax" : "=a"(task_state_segments[id].eflags));
// This will be the entry point for the new task.
asm volatile ("call read_eip" : "=a"(task_state_segments[id].eip));
#endif
return 0;
}
@ -130,10 +136,40 @@ int create_default_frame(task_t* task, internal_entry_point_t ep, void* arg)
uint16_t ds = 0x10;
uint32_t id;
#ifdef SW_TASK_SWITCH
uint32_t *stack;
struct state *stptr;
uint32_t short_state_size = sizeof(struct state)/sizeof(uint32_t) -2;
#endif
if (BUILTIN_EXPECT(!task, 0))
return -EINVAL;
id = task->id;
#ifdef SW_TASK_SWITCH
memset(kstacks[id], 0xCD, KERNEL_STACK_SIZE);
stack = kstacks[id] +KERNEL_STACK_SIZE -sizeof(uint32_t);
*stack-- = 0xDEADBEEF;
*stack-- = arg;
*stack = leave_kernel_task;
stack -= short_state_size;
stptr = stack;
memset(stptr, 0x00, short_state_size*sizeof(uint32_t));
stptr->gs = stptr->fs = stptr->es = stptr->ds = ds;
stptr->esp = stack +short_state_size;
stptr->int_no = 0xB16B00B5;
stptr->error = 0xC03DB4B3;
stptr->eip = ep;
stptr->cs = cs;
stptr->eflags = 0x1002;
//stptr->ss = ds;
//stptr->useresp = kstacks[id] +KERNEL_STACK_SIZE - 3*sizeof(uint32_t);
task->stack = stack;
#else
/* reset buffers */
memset(task_state_segments+id, 0x00, sizeof(tss_t));
memset(kstacks[id], 0xCD, KERNEL_STACK_SIZE);
@ -161,9 +197,48 @@ int create_default_frame(task_t* task, internal_entry_point_t ep, void* arg)
/* setup for the kernel stack frame */
task_state_segments[id].ss0 = 0x10;
task_state_segments[id].esp0 = (uint32_t) kstacks[id] + KERNEL_STACK_SIZE - sizeof(size_t);
#endif
return 0;
}
#ifdef SW_TASK_SWITCH
int create_default_tss(int id)
{
uint16_t cs = 0x08;
uint16_t ds = 0x10;
/* reset buffers */
memset(task_state_segments+id, 0x00, sizeof(tss_t));
/* set default values of all registers */
task_state_segments[id].cs = cs;
task_state_segments[id].ss = ds;
task_state_segments[id].ds = ds;
task_state_segments[id].fs = ds;
task_state_segments[id].gs = ds;
task_state_segments[id].es = ds;
task_state_segments[id].eflags = 0x1002; // 0x1202;
//task_state_segments[id].cr3 = (uint32_t) (virt_to_phys((size_t)task->pgd));
//task_state_segments[id].eip = (uint32_t) ep;
task_state_segments[id].esp = (uint32_t) kstacks[id] + KERNEL_STACK_SIZE - sizeof(size_t);
/* build default stack frame */
*((size_t*)task_state_segments[id].esp) = 0xDEADBEAF; /* dead-end */
/*
task_state_segments[id].ebp = task_state_segments[id].esp;
task_state_segments[id].esp -= sizeof(size_t);
*((size_t*)task_state_segments[id].esp) = (size_t) arg;
task_state_segments[id].esp -= sizeof(size_t);
*((size_t*)task_state_segments[id].esp) = (size_t) leave_kernel_task;
*/
/* setup for the kernel stack frame */
task_state_segments[id].ss0 = 0x10;
task_state_segments[id].esp0 = (uint32_t) kstacks[id] + KERNEL_STACK_SIZE - sizeof(size_t);
return 0;
}
#endif
/* Setup a descriptor in the Global Descriptor Table */
static void gdt_set_gate(int num, unsigned long base, unsigned long limit,
@ -203,7 +278,11 @@ void gdt_install(void)
{
unsigned int i;
#ifdef SW_TASK_SWITCH
memset(task_state_segments, 0x00, MAX_CORES*sizeof(tss_t));
#else
memset(task_state_segments, 0x00, MAX_TASKS*sizeof(tss_t));
#endif
/* Setup the GDT pointer and limit */
gp.limit = (sizeof(gdt_entry_t) * GDT_ENTRIES) - 1;
@ -247,7 +326,12 @@ void gdt_install(void)
/*
* Create TSS for each task at ring0 (we use these segments for task switching)
*/
#ifdef SW_TASK_SWITCH
for(i=0; i<MAX_CORES; i++) {
create_default_tss(i);
#else
for(i=0; i<MAX_TASKS; i++) {
#endif
gdt_set_gate(5+i, (unsigned long) (task_state_segments+i), sizeof(tss_t)-1,
GDT_FLAG_PRESENT | GDT_FLAG_TSS | GDT_FLAG_RING0,
GDT_FLAG_32_BIT);

View file

@ -66,6 +66,9 @@ struct page_dir;
/** @brief The task_t structure */
typedef struct task {
#ifdef SW_TASK_SWITCH
uint32_t stack;
#endif
/// Task id = position in the task table
tid_t id;
/// Task status (INVALID, READY, RUNNING, ...)

View file

@ -46,9 +46,15 @@
*
* A task's id will be its position in this array.
*/
#ifdef SW_TASK_SWITCH
static task_t task_table[MAX_TASKS] = { \
[0] = {0, 0, TASK_IDLE, 0, 0, 0, NULL, NULL, 0, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, NULL, 0, 0, 0, 0}, \
[1 ... MAX_TASKS-1] = {0, 0, TASK_INVALID, 0, 0, 0, NULL, NULL, 0, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, NULL, 0, 0, 0, 0}};
#else
static task_t task_table[MAX_TASKS] = { \
[0] = {0, TASK_IDLE, 0, 0, 0, NULL, NULL, 0, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, NULL, 0, 0, 0, 0}, \
[1 ... MAX_TASKS-1] = {0, TASK_INVALID, 0, 0, 0, NULL, NULL, 0, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, NULL, 0, 0, 0, 0}};
#endif
static spinlock_irqsave_t table_lock = SPINLOCK_IRQSAVE_INIT;
#if MAX_CORES > 1
@ -1374,14 +1380,21 @@ get_task_out:
//kprintf("schedule from %u to %u with prio %u on core %u\n",
// orig_task->id, curr_task->id, (uint32_t)curr_task->prio, CORE_ID);
#ifndef SW_TASK_SWITCH
switch_task(curr_task->id);
#else
write_cr3(virt_to_phys((size_t)curr_task->pgd));
#endif
finish_task_switch(0);
#ifdef SW_TASK_SWITCH
sw_switch_context(&orig_task->stack, &curr_task->stack);
#endif
}
}
void reschedule(void)
{
uint32_t flags = irq_nested_disable();
scheduler();
scheduler();
irq_nested_enable(flags);
}