revise software multitasking
=> remove hardware multitasking
This commit is contained in:
parent
b39a84e07f
commit
b9b962ea73
12 changed files with 168 additions and 256 deletions
|
@ -1,4 +1,4 @@
|
|||
TOPDIR := $(shell pwd)
|
||||
TOPDIR = $(shell pwd)
|
||||
ARCH = x86
|
||||
NAME = metalsvm
|
||||
LWIPDIRS = lwip/src/arch lwip/src/api lwip/src/core lwip/src/core/ipv4 lwip/src/netif
|
||||
|
@ -31,14 +31,15 @@ READELF_FOR_TARGET = $(CROSSCOMPREFIX)readelf
|
|||
MAKE = make
|
||||
RM = rm -rf
|
||||
NASM = nasm
|
||||
QEMU = qemu-system-i386
|
||||
EMU = qemu
|
||||
GDB = gdb
|
||||
|
||||
NASMFLAGS = -felf32 -g
|
||||
NASMFLAGS = -felf32 -g -i$(TOPDIR)/include/metalsvm/
|
||||
INCLUDE = -I$(TOPDIR)/include -I$(TOPDIR)/arch/$(ARCH)/include -I$(TOPDIR)/lwip/src/include -I$(TOPDIR)/lwip/src/include/ipv4 -I$(TOPDIR)/drivers
|
||||
# Compiler options for final code
|
||||
CFLAGS = -g -m32 -march=i586 -Wall -O2 -fno-builtin -fstrength-reduce -fomit-frame-pointer -finline-functions -nostdinc $(INCLUDE) $(STACKPROT)
|
||||
# Compiler options for debugging
|
||||
# Compiler options for debuging
|
||||
#CFLAGS = -g -O -m32 -march=i586 -Wall -fno-builtin -DWITH_FRAME_POINTER -nostdinc $(INCLUDE) $(STACKPROT)
|
||||
ARFLAGS = rsv
|
||||
LDFLAGS = -T link.ld -z max-page-size=4096 --defsym __BUILD_DATE=$(shell date +'%Y%m%d') --defsym __BUILD_TIME=$(shell date +'%H%M%S')
|
||||
|
@ -69,6 +70,7 @@ newlib:
|
|||
RANLIB_FOR_TARGET=$(RANLIB_FOR_TARGET) \
|
||||
STRIP_FOR_TARGET=$(STRIP_FOR_TARGET) \
|
||||
READELF_FOR_TARGET=$(READELF_FOR_TARGET) -C newlib
|
||||
|
||||
tools:
|
||||
$(MAKE) -C tools
|
||||
|
||||
|
@ -80,10 +82,10 @@ $(NAME).elf:
|
|||
$Q$(OBJCOPY_FOR_TARGET) $(STRIP_DEBUG) $(NAME).elf
|
||||
|
||||
qemu: newlib tools $(NAME).elf
|
||||
qemu -monitor stdio -smp 2 -net nic,model=rtl8139 -net user,hostfwd=tcp::12345-:4711 -net dump -kernel metalsvm.elf -initrd tools/initrd.img
|
||||
$(QEMU) -monitor stdio -smp 2 -net nic,model=rtl8139 -net user,hostfwd=tcp::12345-:4711 -net dump -kernel metalsvm.elf -initrd tools/initrd.img
|
||||
|
||||
qemudbg: newlib tools $(NAME).elf
|
||||
qemu -S -s -smp 2 -net nic,model=rtl8139 -net user,hostfwd=tcp::12345-:4711 -net dump -kernel metalsvm.elf -initrd tools/initrd.img
|
||||
$(QEMU) -s -S -smp 2 -net nic,model=rtl8139 -net user,hostfwd=tcp::12345-:4711 -net dump -kernel metalsvm.elf -initrd tools/initrd.img
|
||||
|
||||
gdb: $(NAME).elf
|
||||
make qemudbg > /dev/null &
|
||||
|
@ -105,7 +107,7 @@ veryclean: clean
|
|||
@echo [CC] $@
|
||||
$Q$(CC_FOR_TARGET) -c -D__KERNEL__ $(CFLAGS) -o $@ $<
|
||||
@echo [DEP] $*.dep
|
||||
$Q$(CPP_FOR_TARGET) -MF $*.dep -MT $*.o -MM $(CFLAGS) $<
|
||||
$Q$(CPP_FOR_TARGET) -MF $*.dep -MT $*.o -MM -D__KERNEL__ $(CFLAGS) $<
|
||||
|
||||
|
||||
%.o : %.asm
|
||||
|
|
14
apps/tests.c
14
apps/tests.c
|
@ -432,11 +432,7 @@ static int measure_ctx_switch(void* arg)
|
|||
uint64_t timeslice = freq / TIMER_FREQ;
|
||||
|
||||
kprintf("ID: %d, ", id);
|
||||
#ifdef SW_TASK_SWITCH
|
||||
kprintf("Measuring SW task switch.\n");
|
||||
#else
|
||||
kprintf("Measuring HW task switch.\n");
|
||||
#endif
|
||||
kprintf("Measuring SW task switching.\n");
|
||||
|
||||
for (i=0; i < REPS && stop == 0; i++) {
|
||||
while(id == sid && stop == 0) {
|
||||
|
@ -487,9 +483,9 @@ int test_init(void)
|
|||
//sem_init(&consuming, 0);
|
||||
//mailbox_int32_init(&mbox);
|
||||
|
||||
create_kernel_task(NULL, measure_ctx_switch, (int)0, NORMAL_PRIO);
|
||||
create_kernel_task(NULL, measure_ctx_switch, (int)1, NORMAL_PRIO);
|
||||
//create_kernel_task(NULL, foo, "Hello from foo1", NORMAL_PRIO);
|
||||
//create_kernel_task(NULL, measure_ctx_switch, (int)0, NORMAL_PRIO);
|
||||
//create_kernel_task(NULL, measure_ctx_switch, (int)1, NORMAL_PRIO);
|
||||
create_kernel_task(NULL, foo, "Hello from foo1", NORMAL_PRIO);
|
||||
//create_kernel_task(NULL, foo, "Hello from foo2", NORMAL_PRIO);
|
||||
//create_kernel_task(NULL, join_test, NULL, NORMAL_PRIO);
|
||||
//create_kernel_task(NULL, producer, , NORMAL_PRIO);
|
||||
|
@ -502,7 +498,7 @@ int test_init(void)
|
|||
//create_kernel_task(NULL, laplace, NULL, NORMAL_PRIO);
|
||||
//create_kernel_task(NULL, jacobi, NULL, NORMAL_PRIO);
|
||||
//create_user_task(NULL, "/bin/hello", argv);
|
||||
//create_user_task(NULL, "/bin/tests", argv);
|
||||
create_user_task(NULL, "/bin/tests", argv);
|
||||
//create_user_task(NULL, "/bin/jacobi", argv);
|
||||
//create_user_task(NULL, "/bin/mshell", argv);
|
||||
//create_user_task(NULL, "/bin/jacobi", argv);
|
||||
|
|
|
@ -55,6 +55,13 @@ int arch_fork(task_t* task);
|
|||
*/
|
||||
void switch_task(uint32_t id);
|
||||
|
||||
/**
|
||||
* @brief Switch to current task
|
||||
*
|
||||
* @param stack Pointer to the old stack pointer
|
||||
*/
|
||||
void switch_context(size_t** stack);
|
||||
|
||||
/** @brief Setup a default frame for a new task
|
||||
*
|
||||
* @param task Pointer to the task structure
|
||||
|
|
|
@ -21,6 +21,8 @@
|
|||
; perhaps setting up the GDT and segments. Please note that interrupts
|
||||
; are disabled at this point: More on interrupts later!
|
||||
|
||||
%include "config.inc"
|
||||
|
||||
[BITS 32]
|
||||
; We use a special name to map this section at the begin of our kernel
|
||||
; => Multiboot needs its magic number at the begin of the kernel
|
||||
|
@ -496,51 +498,6 @@ global apic_lint1
|
|||
global apic_error
|
||||
global apic_svr
|
||||
|
||||
global switch_task
|
||||
switch_task:
|
||||
mov eax, [esp+4]
|
||||
add ax, WORD 5
|
||||
mov bx, WORD 8
|
||||
mul bx
|
||||
mov [hack+5], ax
|
||||
hack:
|
||||
jmp 0x00 : 0xDEADBEAF
|
||||
ret
|
||||
|
||||
; This procedure is used by scheduler() to switch tasks.
|
||||
; It is the software-equivalent to the hw-procedure switch_task from above.
|
||||
; Call it in C with the following arguments:
|
||||
; sw_switch_context(&old_tasks_stack_pointer, &new_tasks_stack_pointer)
|
||||
global sw_switch_context
|
||||
sw_switch_context:
|
||||
; The stack layout looks like this:
|
||||
; [new stack pointer]
|
||||
; [old stack pointer]
|
||||
;pushf ; [this procedure's return address] overwritten by: EFLAGS (*1)
|
||||
push DWORD 0x8 ; CS
|
||||
push DWORD [esp+4] ; EIP
|
||||
push DWORD 0 ; Interrupt number
|
||||
push DWORD 0xc0edbabe ; Error code
|
||||
pusha ; Registers...
|
||||
; ---- This will be popped off by iret later.
|
||||
|
||||
pushf
|
||||
pop eax
|
||||
mov [esp+48], eax ; Move EFLAGS to position (*1) by overwriting
|
||||
; the return address of sw_switch_context()
|
||||
|
||||
mov ecx, [esp+52]
|
||||
mov [ecx], esp ; Save stack position in old task structure
|
||||
mov ecx, [esp+56]
|
||||
mov esp, [ecx] ; Load new stack
|
||||
|
||||
sw_rollback:
|
||||
popa
|
||||
|
||||
add esp, 8
|
||||
iret
|
||||
|
||||
|
||||
; 32: IRQ0
|
||||
irq0:
|
||||
; irq0 - irq15 are registered as "Interrupt Gate"
|
||||
|
@ -798,7 +755,31 @@ apic_svr:
|
|||
jmp common_stub
|
||||
|
||||
extern irq_handler
|
||||
extern get_current_stack
|
||||
extern finish_task_switch
|
||||
extern apic_cpu_id
|
||||
extern task_state_segments
|
||||
|
||||
global switch_context
|
||||
ALIGN 4
|
||||
switch_context:
|
||||
; create on the stack a pseudo interrupt
|
||||
; afterwards, we switch to the task with iret
|
||||
mov eax, [esp+4] ; on the stack is already the address to store the old esp
|
||||
pushf ; EFLAGS
|
||||
push DWORD 0x8 ; CS
|
||||
push DWORD rollback ; EIP
|
||||
push DWORD 0 ; Interrupt number
|
||||
push DWORD 0xc0edbabe ; Error code
|
||||
pusha ; Registers...
|
||||
|
||||
jmp common_switch
|
||||
|
||||
ALIGN 4
|
||||
rollback:
|
||||
ret
|
||||
|
||||
ALIGN 4
|
||||
common_stub:
|
||||
pusha
|
||||
|
||||
|
@ -807,6 +788,31 @@ common_stub:
|
|||
call irq_handler
|
||||
add esp, 4
|
||||
|
||||
cmp eax, 0
|
||||
je no_context_switch
|
||||
|
||||
common_switch:
|
||||
mov [eax], esp ; store old esp
|
||||
call get_current_stack ; get new esp
|
||||
xchg eax, esp
|
||||
|
||||
; determine TSS
|
||||
%if MAX_CORES > 1
|
||||
call apic_cpu_id
|
||||
%else
|
||||
xor eax, eax
|
||||
%endif
|
||||
mov ecx, DWORD 0x68
|
||||
mul ecx
|
||||
add eax, task_state_segments
|
||||
add eax, DWORD 4
|
||||
; set esp0 in TSS
|
||||
mov [eax], esp
|
||||
|
||||
; call cleanup code
|
||||
call finish_task_switch
|
||||
|
||||
no_context_switch:
|
||||
popa
|
||||
add esp, 8
|
||||
iret
|
||||
|
|
|
@ -27,11 +27,7 @@
|
|||
#include <asm/page.h>
|
||||
|
||||
gdt_ptr_t gp;
|
||||
#ifdef SW_TASK_SWITCH
|
||||
static tss_t task_state_segments[MAX_CORES] __attribute__ ((aligned (PAGE_SIZE)));
|
||||
#else
|
||||
static tss_t task_state_segments[MAX_TASKS] __attribute__ ((aligned (PAGE_SIZE)));
|
||||
#endif
|
||||
tss_t task_state_segments[MAX_CORES] __attribute__ ((aligned (PAGE_SIZE)));
|
||||
static unsigned char kstacks[MAX_TASKS][KERNEL_STACK_SIZE] __attribute__ ((aligned (PAGE_SIZE))) = {[0 ... MAX_TASKS-1][0 ... KERNEL_STACK_SIZE-1] = 0xCD};
|
||||
uint32_t default_stack_pointer = (uint32_t) kstacks[0] + KERNEL_STACK_SIZE - sizeof(size_t);
|
||||
// currently, our kernel has full access to the ioports
|
||||
|
@ -49,6 +45,15 @@ extern void gdt_flush(void);
|
|||
*/
|
||||
extern void tss_switch(uint32_t id);
|
||||
|
||||
size_t* get_current_stack(void)
|
||||
{
|
||||
task_t* curr_task = per_core(current_task);
|
||||
|
||||
write_cr3(virt_to_phys((size_t)curr_task->pgd));
|
||||
|
||||
return curr_task->stack;
|
||||
}
|
||||
|
||||
size_t get_stack(uint32_t id)
|
||||
{
|
||||
if (BUILTIN_EXPECT(id >= MAX_TASKS, 0))
|
||||
|
@ -58,27 +63,18 @@ size_t get_stack(uint32_t id)
|
|||
|
||||
int register_task(task_t* task) {
|
||||
uint16_t sel;
|
||||
uint32_t id = task->id;
|
||||
|
||||
if (BUILTIN_EXPECT(!task, 0))
|
||||
return -EINVAL;
|
||||
|
||||
sel = (task->id+5) << 3;
|
||||
sel = (CORE_ID+5) << 3;
|
||||
asm volatile ("mov %0, %%ax; ltr %%ax" : : "ir"(sel) : "%eax");
|
||||
|
||||
// initialize the static elements of a TSS
|
||||
task_state_segments[id].cr3 = (uint32_t) (task->pgd);
|
||||
task_state_segments[id].ss0 = 0x10;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int arch_fork(task_t* task)
|
||||
{
|
||||
#ifndef SW_TASK_SWITCH
|
||||
uint16_t cs = 0x08;
|
||||
uint16_t ds = 0x10;
|
||||
uint32_t id;
|
||||
uint32_t id, esp;
|
||||
struct state* state;
|
||||
task_t* curr_task = per_core(current_task);
|
||||
|
||||
if (BUILTIN_EXPECT(!task, 0))
|
||||
|
@ -88,44 +84,37 @@ int arch_fork(task_t* task)
|
|||
// copy kernel stack of the current task
|
||||
memcpy(kstacks[id], kstacks[curr_task->id], KERNEL_STACK_SIZE);
|
||||
|
||||
// reset TSS
|
||||
memset(task_state_segments+id, 0x00, sizeof(tss_t));
|
||||
asm volatile ("mov %%esp, %0" : "=r"(esp));
|
||||
esp -= (uint32_t) kstacks[curr_task->id];
|
||||
esp += (uint32_t) kstacks[id];
|
||||
|
||||
// set default values of all registers
|
||||
task_state_segments[id].cs = cs;
|
||||
task_state_segments[id].ss = ds;
|
||||
task_state_segments[id].ds = ds;
|
||||
task_state_segments[id].fs = ds;
|
||||
task_state_segments[id].gs = ds;
|
||||
task_state_segments[id].es = ds;
|
||||
task_state_segments[id].cr3 = (uint32_t) (virt_to_phys((size_t)task->pgd));
|
||||
task_state_segments[id].ss0 = ds;
|
||||
task_state_segments[id].esp0 = (uint32_t) kstacks[id] + KERNEL_STACK_SIZE - sizeof(size_t);
|
||||
|
||||
// save curret task context
|
||||
asm volatile("mov %%esp, %0" : "=r"(task_state_segments[id].esp));
|
||||
task_state_segments[id].esp -= (uint32_t) kstacks[curr_task->id];
|
||||
task_state_segments[id].esp += (uint32_t) kstacks[id];
|
||||
state = (struct state*) (esp - sizeof(struct state) + 2*sizeof(size_t));
|
||||
|
||||
asm volatile ("pusha");
|
||||
asm volatile ("pop %0" : "=r"(task_state_segments[id].edi));
|
||||
asm volatile ("pop %0" : "=r"(task_state_segments[id].esi));
|
||||
asm volatile ("pop %0" : "=r"(task_state_segments[id].ebp));
|
||||
asm volatile ("pop %0" : "=r"(state->edi));
|
||||
asm volatile ("pop %0" : "=r"(state->esi));
|
||||
asm volatile ("pop %0" : "=r"(state->ebp));
|
||||
#ifdef WITH_FRAME_POINTER
|
||||
task_state_segments[id].ebp -= (uint32_t) kstacks[curr_task->id];
|
||||
task_state_segments[id].ebp += (uint32_t) kstacks[id];
|
||||
state->ebp -= (uint32_t) kstacks[curr_task->id];
|
||||
state->ebp += (uint32_t) kstacks[id];
|
||||
#endif
|
||||
asm volatile ("add $4, %%esp" ::: "%esp");
|
||||
asm volatile ("pop %0" : "=r"(task_state_segments[id].ebx));
|
||||
asm volatile ("pop %0" : "=r"(task_state_segments[id].edx));
|
||||
asm volatile ("pop %0" : "=r"(task_state_segments[id].ecx));
|
||||
asm volatile ("pop %0" : "=r"(task_state_segments[id].eax));
|
||||
state->esp = (uint32_t) state;
|
||||
task->stack = (size_t*) state;
|
||||
asm volatile ("pop %0" : "=r"(state->ebx));
|
||||
asm volatile ("pop %0" : "=r"(state->edx));
|
||||
asm volatile ("pop %0" : "=r"(state->ecx));
|
||||
asm volatile ("pop %0" : "=r"(state->eax));
|
||||
|
||||
state->int_no = 0xB16B00B5;
|
||||
state->error = 0xC03DB4B3;
|
||||
state->cs = cs;
|
||||
// store the current EFLAGS
|
||||
asm volatile ("pushf; pop %%eax" : "=a"(task_state_segments[id].eflags));
|
||||
asm volatile ("pushf; pop %%eax" : "=a"(state->eflags));
|
||||
// enable interrupts
|
||||
state->eflags |= (1 << 9);
|
||||
// This will be the entry point for the new task.
|
||||
asm volatile ("call read_eip" : "=a"(task_state_segments[id].eip));
|
||||
#endif
|
||||
asm volatile ("call read_eip" : "=a"(state->eip));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -133,132 +122,56 @@ int arch_fork(task_t* task)
|
|||
int create_default_frame(task_t* task, internal_entry_point_t ep, void* arg)
|
||||
{
|
||||
uint16_t cs = 0x08;
|
||||
uint16_t ds = 0x10;
|
||||
uint32_t id;
|
||||
|
||||
#ifdef SW_TASK_SWITCH
|
||||
uint32_t *stack;
|
||||
struct state *stptr;
|
||||
uint32_t short_state_size = sizeof(struct state)/sizeof(uint32_t) -2;
|
||||
#endif
|
||||
|
||||
if (BUILTIN_EXPECT(!task, 0))
|
||||
return -EINVAL;
|
||||
id = task->id;
|
||||
|
||||
#ifdef SW_TASK_SWITCH
|
||||
memset(kstacks[id], 0xCD, KERNEL_STACK_SIZE);
|
||||
|
||||
/* The difference between setting up a task for SW-task-switching
|
||||
* and not for HW-task-switching is setting up a stack and not a TSS.
|
||||
* This is the stack which will be activated and popped off for iret later.
|
||||
*/
|
||||
stack = kstacks[id] +KERNEL_STACK_SIZE -sizeof(uint32_t);
|
||||
stack = (uint32_t*) (kstacks[id] + KERNEL_STACK_SIZE - sizeof(uint32_t));
|
||||
|
||||
/* The next three things on the stack are a marker for debugging purposes, ... */
|
||||
*stack-- = 0xDEADBEEF;
|
||||
/* the first-function-to-be-called's arguments, ... */
|
||||
*stack-- = arg;
|
||||
*stack-- = (size_t) arg;
|
||||
/* and the "caller" we shall return to.
|
||||
* This procedure cleans the task after exit. */
|
||||
*stack = leave_kernel_task;
|
||||
*stack = (size_t) leave_kernel_task;
|
||||
|
||||
/* Next bunch on the stack is the initial register state.
|
||||
* The stack must look like the stack of a task which was
|
||||
* scheduled away previously. */
|
||||
|
||||
/* short_state_size was introduced because the convenient "struct state"
|
||||
* is used for filling the stack with initial values. But the problem is that
|
||||
* "iret" will not remove the last two entries from the stack, since we're
|
||||
* "returning" from kernel space to kernel space. Therefore it is shortened
|
||||
* by its last two entries. */
|
||||
stack -= short_state_size;
|
||||
stack = (uint32_t*) ((size_t) stack - sizeof(struct state) + 2*sizeof(size_t));
|
||||
|
||||
stptr = stack;
|
||||
memset(stptr, 0x00, short_state_size*sizeof(uint32_t));
|
||||
stptr->esp = stack +short_state_size;
|
||||
stptr = (struct state *) stack;
|
||||
memset(stptr, 0x00, sizeof(struct state) - 2*sizeof(size_t));
|
||||
stptr->esp = (size_t)stack + sizeof(struct state) - 2*sizeof(size_t);
|
||||
stptr->int_no = 0xB16B00B5;
|
||||
stptr->error = 0xC03DB4B3;
|
||||
|
||||
/* The instruction pointer shall be set on the first function to be called
|
||||
* after IRETing */
|
||||
stptr->eip = ep;
|
||||
stptr->eip = (uint32_t)ep;
|
||||
stptr->cs = cs;
|
||||
stptr->eflags = 0x1002;
|
||||
stptr->eflags = 0x1202;
|
||||
// the creation of a kernel tasks didn't change the IOPL level
|
||||
// => useresp & ss is not required
|
||||
|
||||
/* Set the task's stack pointer entry to the stack we have crafted right now.
|
||||
* This is the pointer which will be used by sw_switch_task(old_task, new_task) later.*/
|
||||
task->stack = stack;
|
||||
#else
|
||||
/* reset buffers */
|
||||
memset(task_state_segments+id, 0x00, sizeof(tss_t));
|
||||
memset(kstacks[id], 0xCD, KERNEL_STACK_SIZE);
|
||||
|
||||
/* set default values of all registers */
|
||||
task_state_segments[id].cs = cs;
|
||||
task_state_segments[id].ss = ds;
|
||||
task_state_segments[id].ds = ds;
|
||||
task_state_segments[id].fs = ds;
|
||||
task_state_segments[id].gs = ds;
|
||||
task_state_segments[id].es = ds;
|
||||
task_state_segments[id].eflags = 0x1002; // 0x1202;
|
||||
task_state_segments[id].cr3 = (uint32_t) (virt_to_phys((size_t)task->pgd));
|
||||
task_state_segments[id].eip = (uint32_t) ep;
|
||||
task_state_segments[id].esp = (uint32_t) kstacks[id] + KERNEL_STACK_SIZE - sizeof(size_t);
|
||||
|
||||
/* build default stack frame */
|
||||
*((size_t*)task_state_segments[id].esp) = 0xDEADBEAF; /* dead-end */
|
||||
task_state_segments[id].ebp = task_state_segments[id].esp;
|
||||
task_state_segments[id].esp -= sizeof(size_t);
|
||||
*((size_t*)task_state_segments[id].esp) = (size_t) arg;
|
||||
task_state_segments[id].esp -= sizeof(size_t);
|
||||
*((size_t*)task_state_segments[id].esp) = (size_t) leave_kernel_task;
|
||||
|
||||
/* setup for the kernel stack frame */
|
||||
task_state_segments[id].ss0 = 0x10;
|
||||
task_state_segments[id].esp0 = (uint32_t) kstacks[id] + KERNEL_STACK_SIZE - sizeof(size_t);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef SW_TASK_SWITCH
|
||||
int create_default_tss(int id)
|
||||
{
|
||||
uint16_t cs = 0x08;
|
||||
uint16_t ds = 0x10;
|
||||
|
||||
/* reset buffers */
|
||||
memset(task_state_segments+id, 0x00, sizeof(tss_t));
|
||||
|
||||
/* set default values of all registers */
|
||||
task_state_segments[id].cs = cs;
|
||||
task_state_segments[id].ss = ds;
|
||||
task_state_segments[id].ds = ds;
|
||||
task_state_segments[id].fs = ds;
|
||||
task_state_segments[id].gs = ds;
|
||||
task_state_segments[id].es = ds;
|
||||
task_state_segments[id].eflags = 0x1002; // 0x1202;
|
||||
//task_state_segments[id].cr3 = (uint32_t) (virt_to_phys((size_t)task->pgd));
|
||||
//task_state_segments[id].eip = (uint32_t) ep;
|
||||
task_state_segments[id].esp = (uint32_t) kstacks[id] + KERNEL_STACK_SIZE - sizeof(size_t);
|
||||
|
||||
/* build default stack frame */
|
||||
*((size_t*)task_state_segments[id].esp) = 0xDEADBEAF; /* dead-end */
|
||||
/*
|
||||
task_state_segments[id].ebp = task_state_segments[id].esp;
|
||||
task_state_segments[id].esp -= sizeof(size_t);
|
||||
*((size_t*)task_state_segments[id].esp) = (size_t) arg;
|
||||
task_state_segments[id].esp -= sizeof(size_t);
|
||||
*((size_t*)task_state_segments[id].esp) = (size_t) leave_kernel_task;
|
||||
*/
|
||||
|
||||
/* setup for the kernel stack frame */
|
||||
task_state_segments[id].ss0 = 0x10;
|
||||
task_state_segments[id].esp0 = (uint32_t) kstacks[id] + KERNEL_STACK_SIZE - sizeof(size_t);
|
||||
/* Set the task's stack pointer entry to the stack we have crafted right now. */
|
||||
task->stack = (size_t*)stack;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Setup a descriptor in the Global Descriptor Table */
|
||||
static void gdt_set_gate(int num, unsigned long base, unsigned long limit,
|
||||
|
@ -298,11 +211,7 @@ void gdt_install(void)
|
|||
{
|
||||
unsigned int i;
|
||||
|
||||
#ifdef SW_TASK_SWITCH
|
||||
memset(task_state_segments, 0x00, MAX_CORES*sizeof(tss_t));
|
||||
#else
|
||||
memset(task_state_segments, 0x00, MAX_TASKS*sizeof(tss_t));
|
||||
#endif
|
||||
|
||||
/* Setup the GDT pointer and limit */
|
||||
gp.limit = (sizeof(gdt_entry_t) * GDT_ENTRIES) - 1;
|
||||
|
@ -346,15 +255,14 @@ void gdt_install(void)
|
|||
/*
|
||||
* Create TSS for each task at ring0 (we use these segments for task switching)
|
||||
*/
|
||||
#ifdef SW_TASK_SWITCH
|
||||
for(i=0; i<MAX_CORES; i++) {
|
||||
create_default_tss(i);
|
||||
#else
|
||||
for(i=0; i<MAX_TASKS; i++) {
|
||||
#endif
|
||||
/* set default values */
|
||||
task_state_segments[i].eflags = 0x1202;
|
||||
task_state_segments[i].ss0 = 0x10; // data segment
|
||||
task_state_segments[i].esp0 = 0xDEADBEEF; // invalid pseudo address
|
||||
|
||||
gdt_set_gate(5+i, (unsigned long) (task_state_segments+i), sizeof(tss_t)-1,
|
||||
GDT_FLAG_PRESENT | GDT_FLAG_TSS | GDT_FLAG_RING0,
|
||||
GDT_FLAG_32_BIT);
|
||||
GDT_FLAG_PRESENT | GDT_FLAG_TSS | GDT_FLAG_RING0, GDT_FLAG_32_BIT);
|
||||
}
|
||||
|
||||
/* Flush out the old GDT and install the new changes! */
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include <metalsvm/string.h>
|
||||
#include <metalsvm/tasks.h>
|
||||
#include <metalsvm/errno.h>
|
||||
#include <metalsvm/page.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/idt.h>
|
||||
#include <asm/isrs.h>
|
||||
|
@ -224,13 +225,15 @@ int irq_init(void)
|
|||
* controller (an IRQ from 8 to 15) gets an interrupt, you need to
|
||||
* acknowledge the interrupt at BOTH controllers, otherwise, you
|
||||
* only send an EOI command to the first controller. If you don't send
|
||||
* an EOI, it won't raise any more IRQs.\n
|
||||
* \n
|
||||
* an EOI, it won't raise any more IRQs.
|
||||
*
|
||||
* Note: If we enabled the APIC, we also disabled the PIC. Afterwards,
|
||||
* we get no interrupts between 0 and 15.
|
||||
*/
|
||||
void irq_handler(struct state *s)
|
||||
size_t** irq_handler(struct state *s)
|
||||
{
|
||||
size_t** ret = NULL;
|
||||
|
||||
/* This is a blank function pointer */
|
||||
void (*handler) (struct state * s);
|
||||
|
||||
|
@ -276,7 +279,9 @@ void irq_handler(struct state *s)
|
|||
leave_handler:
|
||||
// timer interrupt?
|
||||
if ((s->int_no == 32) || (s->int_no == 123))
|
||||
scheduler(); // switch to a new task
|
||||
ret = scheduler(); // switch to a new task
|
||||
else if ((s->int_no >= 32) && (get_highest_priority() > per_core(current_task)->prio))
|
||||
scheduler();
|
||||
ret = scheduler();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -42,8 +42,6 @@ extern "C" {
|
|||
#define VIDEO_MEM_ADDR 0xB8000 // the video memora address
|
||||
#define SMP_SETUP_ADDR 0x07000
|
||||
|
||||
#define SW_TASK_SWITCH
|
||||
|
||||
#define BYTE_ORDER LITTLE_ENDIAN
|
||||
|
||||
/*
|
||||
|
|
4
include/metalsvm/config.inc.example
Normal file
4
include/metalsvm/config.inc.example
Normal file
|
@ -0,0 +1,4 @@
|
|||
; config macros for the assembler code
|
||||
|
||||
; define the maximum number of core
|
||||
%define MAX_CORES 1
|
|
@ -110,8 +110,12 @@ void load_balancing(void);
|
|||
/** @brief Task switcher
|
||||
*
|
||||
* Timer-interrupted use of this function for task switching
|
||||
*
|
||||
* @return
|
||||
* - 0 no context switch
|
||||
* - !0 address of the old stack pointer
|
||||
*/
|
||||
void scheduler(void);
|
||||
size_t** scheduler(void);
|
||||
|
||||
/** @brief Wake up a blocked task
|
||||
*
|
||||
|
|
|
@ -67,13 +67,12 @@ struct page_dir;
|
|||
|
||||
/** @brief The task_t structure */
|
||||
typedef struct task {
|
||||
#ifdef SW_TASK_SWITCH
|
||||
uint32_t stack;
|
||||
#endif
|
||||
/// Task id = position in the task table
|
||||
tid_t id;
|
||||
/// Task status (INVALID, READY, RUNNING, ...)
|
||||
uint32_t status;
|
||||
/// copy of the stack pointer before a context switch
|
||||
size_t* stack;
|
||||
/// Additional status flags. For instance, to signalize the using of the FPU
|
||||
uint8_t flags;
|
||||
/// Task priority
|
||||
|
@ -86,29 +85,29 @@ typedef struct task {
|
|||
struct task* prev;
|
||||
/// last core id on which the task was running
|
||||
uint32_t last_core;
|
||||
/// Usage in number of pages
|
||||
/// usage in number of pages
|
||||
atomic_int32_t user_usage;
|
||||
/// Avoids concurrent access to the page directory
|
||||
/// avoids concurrent access to the page directory
|
||||
spinlock_t pgd_lock;
|
||||
/// pointer to the page directory
|
||||
struct page_dir* pgd;
|
||||
/// Lock for the VMA_list
|
||||
/// lock for the VMA_list
|
||||
spinlock_t vma_lock;
|
||||
/// List of VMAs
|
||||
/// list of VMAs
|
||||
vma_t* vma_list;
|
||||
/// Filedescriptor table
|
||||
/// filedescriptor table
|
||||
filp_t* fildes_table;
|
||||
/// starting time/tick of the task
|
||||
uint64_t start_tick;
|
||||
/// Start address of the heap
|
||||
/// start address of the heap
|
||||
size_t start_heap;
|
||||
/// End address of the heap
|
||||
/// end address of the heap
|
||||
size_t end_heap;
|
||||
/// LwIP error code
|
||||
int lwip_err;
|
||||
/// Mail inbox
|
||||
/// mail inbox
|
||||
mailbox_wait_msg_t inbox;
|
||||
/// Mail outbox array
|
||||
/// mail outbox array
|
||||
mailbox_wait_msg_t* outbox[MAX_TASKS];
|
||||
/// FPU state
|
||||
union fpu_state fpu;
|
||||
|
|
|
@ -253,7 +253,7 @@ int initd(void* arg)
|
|||
#endif
|
||||
|
||||
// start echo, netio and rlogind
|
||||
//echo_init();
|
||||
echo_init();
|
||||
create_user_task(&id, "/bin/rlogind", argv);
|
||||
kprintf("Create rlogind with id %u\n", id);
|
||||
//netio_init();
|
||||
|
|
|
@ -46,16 +46,9 @@
|
|||
*
|
||||
* A task's id will be its position in this array.
|
||||
*/
|
||||
#ifdef SW_TASK_SWITCH
|
||||
static task_t task_table[MAX_TASKS] = { \
|
||||
[0] = {0, 0, TASK_IDLE, 0, 0, 0, NULL, NULL, 0, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, NULL, 0, 0, 0, 0}, \
|
||||
[1 ... MAX_TASKS-1] = {0, 0, TASK_INVALID, 0, 0, 0, NULL, NULL, 0, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, NULL, 0, 0, 0, 0}};
|
||||
#else
|
||||
static task_t task_table[MAX_TASKS] = { \
|
||||
[0] = {0, TASK_IDLE, 0, 0, 0, NULL, NULL, 0, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, NULL, 0, 0, 0, 0}, \
|
||||
[1 ... MAX_TASKS-1] = {0, TASK_INVALID, 0, 0, 0, NULL, NULL, 0, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, NULL, 0, 0, 0, 0}};
|
||||
#endif
|
||||
|
||||
[0] = {0, TASK_IDLE, NULL, 0, 0, 0, NULL, NULL, 0, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, NULL, 0, 0, 0, 0}, \
|
||||
[1 ... MAX_TASKS-1] = {0, TASK_INVALID, NULL, 0, 0, 0, NULL, NULL, 0, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, NULL, 0, 0, 0, 0}};
|
||||
static spinlock_irqsave_t table_lock = SPINLOCK_IRQSAVE_INIT;
|
||||
#if MAX_CORES > 1
|
||||
static runqueue_t runqueues[MAX_CORES] = { \
|
||||
|
@ -126,7 +119,7 @@ size_t get_idle_task(uint32_t id)
|
|||
#endif
|
||||
}
|
||||
|
||||
static void finish_task_switch(uint32_t irq)
|
||||
void finish_task_switch(void)
|
||||
{
|
||||
uint8_t prio;
|
||||
uint32_t core_id = CORE_ID;
|
||||
|
@ -148,9 +141,6 @@ static void finish_task_switch(uint32_t irq)
|
|||
runqueues[core_id].prio_bitmap |= (1 << prio);
|
||||
}
|
||||
spinlock_irqsave_unlock(&runqueues[core_id].lock);
|
||||
|
||||
if (irq)
|
||||
irq_enable();
|
||||
}
|
||||
|
||||
/** @brief Wakeup tasks which are waiting for a message from the current one
|
||||
|
@ -220,7 +210,7 @@ static void NORETURN do_exit(int arg) {
|
|||
irq_nested_enable(flags);
|
||||
|
||||
reschedule();
|
||||
|
||||
|
||||
kprintf("Kernel panic: scheduler on core %d found no valid task\n", CORE_ID);
|
||||
while(1) {
|
||||
HALT;
|
||||
|
@ -421,7 +411,6 @@ int sys_fork(void)
|
|||
// Leave the function without releasing the locks
|
||||
// because the locks are already released
|
||||
// by the parent task!
|
||||
finish_task_switch(1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -456,8 +445,6 @@ static int STDCALL kernel_entry(void* args)
|
|||
int ret;
|
||||
kernel_args_t* kernel_args = (kernel_args_t*) args;
|
||||
|
||||
finish_task_switch(1);
|
||||
|
||||
if (BUILTIN_EXPECT(!kernel_args, 0))
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -738,8 +725,6 @@ static int STDCALL user_entry(void* arg)
|
|||
{
|
||||
int ret;
|
||||
|
||||
finish_task_switch(1);
|
||||
|
||||
if (BUILTIN_EXPECT(!arg, 0))
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -1273,7 +1258,7 @@ void load_balancing(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
void scheduler(void)
|
||||
size_t** scheduler(void)
|
||||
{
|
||||
task_t* orig_task;
|
||||
task_t* curr_task;
|
||||
|
@ -1379,20 +1364,18 @@ get_task_out:
|
|||
}
|
||||
|
||||
//kprintf("schedule from %u to %u with prio %u on core %u\n", orig_task->id, curr_task->id, (uint32_t)curr_task->prio, CORE_ID);
|
||||
#ifndef SW_TASK_SWITCH
|
||||
switch_task(curr_task->id);
|
||||
#endif
|
||||
finish_task_switch(0);
|
||||
#ifdef SW_TASK_SWITCH
|
||||
write_cr3(virt_to_phys((size_t)curr_task->pgd));
|
||||
sw_switch_context(&orig_task->stack, &curr_task->stack);
|
||||
#endif
|
||||
|
||||
return (size_t**) &(orig_task->stack);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void reschedule(void)
|
||||
{
|
||||
size_t** stack;
|
||||
uint32_t flags = irq_nested_disable();
|
||||
scheduler();
|
||||
if ((stack = scheduler()))
|
||||
switch_context(stack);
|
||||
irq_nested_enable(flags);
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue