Merge branch 'sw_switch'

This commit is contained in:
Stefan Lankes 2012-05-23 11:21:38 -07:00
commit 3d1facb657
11 changed files with 258 additions and 126 deletions

1
.gitignore vendored
View file

@ -12,6 +12,7 @@
tags
Makefile
include/metalsvm/config.h
include/metalsvm/config.inc
tools/make_initrd
newlib/examples/hello
newlib/examples/jacobi

View file

@ -1,4 +1,4 @@
TOPDIR := $(shell pwd)
TOPDIR = $(shell pwd)
ARCH = x86
NAME = metalsvm
LWIPDIRS = lwip/src/arch lwip/src/api lwip/src/core lwip/src/core/ipv4 lwip/src/netif
@ -31,14 +31,15 @@ READELF_FOR_TARGET = $(CROSSCOMPREFIX)readelf
MAKE = make
RM = rm -rf
NASM = nasm
QEMU = qemu-system-i386
EMU = qemu
GDB = gdb
NASMFLAGS = -felf32 -g
NASMFLAGS = -felf32 -g -i$(TOPDIR)/include/metalsvm/
INCLUDE = -I$(TOPDIR)/include -I$(TOPDIR)/arch/$(ARCH)/include -I$(TOPDIR)/lwip/src/include -I$(TOPDIR)/lwip/src/include/ipv4 -I$(TOPDIR)/drivers
# Compiler options for final code
CFLAGS = -g -m32 -march=i586 -Wall -O2 -fno-builtin -fstrength-reduce -fomit-frame-pointer -finline-functions -nostdinc $(INCLUDE) $(STACKPROT)
# Compiler options for debugging
# Compiler options for debuging
#CFLAGS = -g -O -m32 -march=i586 -Wall -fno-builtin -DWITH_FRAME_POINTER -nostdinc $(INCLUDE) $(STACKPROT)
ARFLAGS = rsv
LDFLAGS = -T link.ld -z max-page-size=4096 --defsym __BUILD_DATE=$(shell date +'%Y%m%d') --defsym __BUILD_TIME=$(shell date +'%H%M%S')
@ -69,6 +70,7 @@ newlib:
RANLIB_FOR_TARGET=$(RANLIB_FOR_TARGET) \
STRIP_FOR_TARGET=$(STRIP_FOR_TARGET) \
READELF_FOR_TARGET=$(READELF_FOR_TARGET) -C newlib
tools:
$(MAKE) -C tools
@ -80,10 +82,10 @@ $(NAME).elf:
$Q$(OBJCOPY_FOR_TARGET) $(STRIP_DEBUG) $(NAME).elf
qemu: newlib tools $(NAME).elf
qemu -monitor stdio -smp 2 -net nic,model=rtl8139 -net user,hostfwd=tcp::12345-:4711 -net dump -kernel metalsvm.elf -initrd tools/initrd.img
$(QEMU) -monitor stdio -smp 2 -net nic,model=rtl8139 -net user,hostfwd=tcp::12345-:4711 -net dump -kernel metalsvm.elf -initrd tools/initrd.img
qemudbg: newlib tools $(NAME).elf
qemu -S -s -smp 2 -net nic,model=rtl8139 -net user,hostfwd=tcp::12345-:4711 -net dump -kernel metalsvm.elf -initrd tools/initrd.img
$(QEMU) -s -S -smp 2 -net nic,model=rtl8139 -net user,hostfwd=tcp::12345-:4711 -net dump -kernel metalsvm.elf -initrd tools/initrd.img
gdb: $(NAME).elf
make qemudbg > /dev/null &
@ -105,10 +107,14 @@ veryclean: clean
@echo [CC] $@
$Q$(CC_FOR_TARGET) -c -D__KERNEL__ $(CFLAGS) -o $@ $<
@echo [DEP] $*.dep
$Q$(CPP_FOR_TARGET) -MF $*.dep -MT $*.o -MM $(CFLAGS) $<
$Q$(CPP_FOR_TARGET) -MF $*.dep -MT $*.o -MM -D__KERNEL__ $(CFLAGS) $<
include/metalsvm/config.inc: include/metalsvm/config.h
@echo "; This file is generated automatically from the config.h file." > include/metalsvm/config.inc
@echo "; Before editing this, you should consider editing config.h." >> include/metalsvm/config.inc
@awk '/^#define MAX_CORES/{ print "%define MAX_CORES", $$3 }' include/metalsvm/config.h >> include/metalsvm/config.inc
%.o : %.asm
%.o : %.asm include/metalsvm/config.inc
@echo [ASM] $@
$Q$(NASM) $(NASMFLAGS) -o $@ $<

View file

@ -413,6 +413,65 @@ static int pi(void* arg)
return 0;
}
#define REPS 10000
volatile uint64_t t1, t2;
volatile int stop = !!0;
volatile int sid = 0;
static int measure_ctx_switch(void* arg)
{
int id = !!(int)arg;
int oid = !id;
uint64_t freq = get_cpu_frequency() *1000 *1000;
uint64_t diff, min = (uint64_t)-1, max = 0, avg = 0;
int i;
uint32_t a=0,b,c,d;
// Size of a timeslice in ticks
uint64_t timeslice = freq / TIMER_FREQ;
kprintf("ID: %d, ", id);
kprintf("Measuring SW task switching.\n");
for (i=0; i < REPS && stop == 0; i++) {
while(id == sid && stop == 0) {
t2 = rdtsc();
cpuid(0,&a,&b,&c,&d);
}
cpuid(0,&a,&b,&c,&d);
diff = rdtsc() -t2;
// The last measurement is garbage
if (stop) break;
// The first ones are garbage, too
if (i < 5) goto next_try;
if (diff >= timeslice) {
i--;
goto next_try;
}
kprintf("%i: diff= %llu, i= %i\n", id, diff, i);
if (diff > max) max = diff;
if (diff < min) min = diff;
avg += diff;
next_try:
sid = id;
}
avg /= i-5;
stop = 1;
kprintf("maximum gap: %llu ticks\n", max);
kprintf("minimum gap: %llu ticks\n", min);
kprintf("average gap: %llu ticks\n", avg);
kprintf("Timeslice size: %llu ticks\n", timeslice);
return 0;
}
int test_init(void)
{
// char* argv[] = {"/bin/mshell", NULL};
@ -424,8 +483,11 @@ int test_init(void)
//sem_init(&consuming, 0);
//mailbox_int32_init(&mbox);
//create_kernel_task(NULL, measure_ctx_switch, (int)0, NORMAL_PRIO);
//create_kernel_task(NULL, measure_ctx_switch, (int)1, NORMAL_PRIO);
create_kernel_task(NULL, foo, "Hello from foo1", NORMAL_PRIO);
create_kernel_task(NULL, join_test, NULL, NORMAL_PRIO);
//create_kernel_task(NULL, foo, "Hello from foo2", NORMAL_PRIO);
//create_kernel_task(NULL, join_test, NULL, NORMAL_PRIO);
//create_kernel_task(NULL, producer, , NORMAL_PRIO);
//create_kernel_task(NULL, consumer, NULL, NORMAL_PRIO);
//create_kernel_task(NULL, mail_ping, NULL, NORMAL_PRIO);

View file

@ -55,6 +55,13 @@ int arch_fork(task_t* task);
*/
void switch_task(uint32_t id);
/**
* @brief Switch to current task
*
* @param stack Pointer to the old stack pointer
*/
void switch_context(size_t** stack);
/** @brief Setup a default frame for a new task
*
* @param task Pointer to the task structure

View file

@ -21,6 +21,8 @@
; perhaps setting up the GDT and segments. Please note that interrupts
; are disabled at this point: More on interrupts later!
%include "config.inc"
[BITS 32]
; We use a special name to map this section at the begin of our kernel
; => Multiboot needs its magic number at the begin of the kernel
@ -496,17 +498,6 @@ global apic_lint1
global apic_error
global apic_svr
global switch_task
switch_task:
mov eax, [esp+4]
add ax, WORD 5
mov bx, WORD 8
mul bx
mov [hack+5], ax
hack:
jmp 0x00 : 0xDEADBEAF
ret
; 32: IRQ0
irq0:
; irq0 - irq15 are registered as "Interrupt Gate"
@ -764,7 +755,31 @@ apic_svr:
jmp common_stub
extern irq_handler
extern get_current_stack
extern finish_task_switch
extern apic_cpu_id
extern task_state_segments
global switch_context
ALIGN 4
switch_context:
; create on the stack a pseudo interrupt
; afterwards, we switch to the task with iret
mov eax, [esp+4] ; on the stack is already the address to store the old esp
pushf ; EFLAGS
push DWORD 0x8 ; CS
push DWORD rollback ; EIP
push DWORD 0 ; Interrupt number
push DWORD 0xc0edbabe ; Error code
pusha ; Registers...
jmp common_switch
ALIGN 4
rollback:
ret
ALIGN 4
common_stub:
pusha
@ -773,6 +788,30 @@ common_stub:
call irq_handler
add esp, 4
cmp eax, 0
je no_context_switch
common_switch:
mov [eax], esp ; store old esp
call get_current_stack ; get new esp
xchg eax, esp
; determine TSS
%if MAX_CORES > 1
call apic_cpu_id
mov ecx, DWORD 0x68
mul ecx
%else
xor eax, eax
%endif
add eax, task_state_segments
; set esp0 in TSS
mov [eax+4], esp
; call cleanup code
call finish_task_switch
no_context_switch:
popa
add esp, 8
iret

View file

@ -27,7 +27,7 @@
#include <asm/page.h>
gdt_ptr_t gp;
static tss_t task_state_segments[MAX_TASKS] __attribute__ ((aligned (PAGE_SIZE)));
tss_t task_state_segments[MAX_CORES] __attribute__ ((aligned (PAGE_SIZE)));
static unsigned char kstacks[MAX_TASKS][KERNEL_STACK_SIZE] __attribute__ ((aligned (PAGE_SIZE))) = {[0 ... MAX_TASKS-1][0 ... KERNEL_STACK_SIZE-1] = 0xCD};
uint32_t default_stack_pointer = (uint32_t) kstacks[0] + KERNEL_STACK_SIZE - sizeof(size_t);
// currently, our kernel has full access to the ioports
@ -45,6 +45,15 @@ extern void gdt_flush(void);
*/
extern void tss_switch(uint32_t id);
size_t* get_current_stack(void)
{
task_t* curr_task = per_core(current_task);
write_cr3(virt_to_phys((size_t)curr_task->pgd));
return curr_task->stack;
}
size_t get_stack(uint32_t id)
{
if (BUILTIN_EXPECT(id >= MAX_TASKS, 0))
@ -54,26 +63,18 @@ size_t get_stack(uint32_t id)
int register_task(task_t* task) {
uint16_t sel;
uint32_t id = task->id;
if (BUILTIN_EXPECT(!task, 0))
return -EINVAL;
sel = (task->id+5) << 3;
sel = (CORE_ID+5) << 3;
asm volatile ("mov %0, %%ax; ltr %%ax" : : "ir"(sel) : "%eax");
// initialize the static elements of a TSS
task_state_segments[id].cr3 = (uint32_t) (task->pgd);
task_state_segments[id].ss0 = 0x10;
return 0;
}
int arch_fork(task_t* task)
{
uint16_t cs = 0x08;
uint16_t ds = 0x10;
uint32_t id;
uint32_t id, esp;
struct state* state;
task_t* curr_task = per_core(current_task);
if (BUILTIN_EXPECT(!task, 0))
@ -83,43 +84,37 @@ int arch_fork(task_t* task)
// copy kernel stack of the current task
memcpy(kstacks[id], kstacks[curr_task->id], KERNEL_STACK_SIZE);
// reset TSS
memset(task_state_segments+id, 0x00, sizeof(tss_t));
asm volatile ("mov %%esp, %0" : "=r"(esp));
esp -= (uint32_t) kstacks[curr_task->id];
esp += (uint32_t) kstacks[id];
// set default values of all registers
task_state_segments[id].cs = cs;
task_state_segments[id].ss = ds;
task_state_segments[id].ds = ds;
task_state_segments[id].fs = ds;
task_state_segments[id].gs = ds;
task_state_segments[id].es = ds;
task_state_segments[id].cr3 = (uint32_t) (virt_to_phys((size_t)task->pgd));
task_state_segments[id].ss0 = ds;
task_state_segments[id].esp0 = (uint32_t) kstacks[id] + KERNEL_STACK_SIZE - sizeof(size_t);
state = (struct state*) (esp - sizeof(struct state) + 2*sizeof(size_t));
memset(state, 0x00, sizeof(struct state) - 2*sizeof(size_t));
// save curret task context
asm volatile("mov %%esp, %0" : "=r"(task_state_segments[id].esp));
task_state_segments[id].esp -= (uint32_t) kstacks[curr_task->id];
task_state_segments[id].esp += (uint32_t) kstacks[id];
asm volatile ("pusha");
asm volatile ("pop %0" : "=r"(task_state_segments[id].edi));
asm volatile ("pop %0" : "=r"(task_state_segments[id].esi));
asm volatile ("pop %0" : "=r"(task_state_segments[id].ebp));
#ifdef WITH_FRAME_POINTER
task_state_segments[id].ebp -= (uint32_t) kstacks[curr_task->id];
task_state_segments[id].ebp += (uint32_t) kstacks[id];
#endif
asm volatile ("pusha; pop %0" : "=r"(state->edi));
asm volatile ("pop %0" : "=r"(state->esi));
asm volatile ("pop %0" : "=r"(state->ebp));
asm volatile ("add $4, %%esp" ::: "%esp");
asm volatile ("pop %0" : "=r"(task_state_segments[id].ebx));
asm volatile ("pop %0" : "=r"(task_state_segments[id].edx));
asm volatile ("pop %0" : "=r"(task_state_segments[id].ecx));
asm volatile ("pop %0" : "=r"(task_state_segments[id].eax));
asm volatile ("pop %0" : "=r"(state->ebx));
asm volatile ("pop %0" : "=r"(state->edx));
asm volatile ("pop %0" : "=r"(state->ecx));
asm volatile ("pop %0" : "=r"(state->eax));
#ifdef WITH_FRAME_POINTER
state->ebp -= (uint32_t) kstacks[curr_task->id];
state->ebp += (uint32_t) kstacks[id];
#endif
state->esp = (uint32_t) state;
task->stack = (size_t*) state;
state->int_no = 0xB16B00B5;
state->error = 0xC03DB4B3;
state->cs = cs;
// store the current EFLAGS
asm volatile ("pushf; pop %%eax" : "=a"(task_state_segments[id].eflags));
asm volatile ("pushf; pop %%eax" : "=a"(state->eflags));
// enable interrupts
state->eflags |= (1 << 9);
// This will be the entry point for the new task.
asm volatile ("call read_eip" : "=a"(task_state_segments[id].eip));
asm volatile ("call read_eip" : "=a"(state->eip));
return 0;
}
@ -127,40 +122,53 @@ int arch_fork(task_t* task)
int create_default_frame(task_t* task, internal_entry_point_t ep, void* arg)
{
uint16_t cs = 0x08;
uint16_t ds = 0x10;
uint32_t id;
uint32_t *stack;
struct state *stptr;
if (BUILTIN_EXPECT(!task, 0))
return -EINVAL;
id = task->id;
/* reset buffers */
memset(task_state_segments+id, 0x00, sizeof(tss_t));
memset(kstacks[id], 0xCD, KERNEL_STACK_SIZE);
/* set default values of all registers */
task_state_segments[id].cs = cs;
task_state_segments[id].ss = ds;
task_state_segments[id].ds = ds;
task_state_segments[id].fs = ds;
task_state_segments[id].gs = ds;
task_state_segments[id].es = ds;
task_state_segments[id].eflags = 0x1002; // 0x1202;
task_state_segments[id].cr3 = (uint32_t) (virt_to_phys((size_t)task->pgd));
task_state_segments[id].eip = (uint32_t) ep;
task_state_segments[id].esp = (uint32_t) kstacks[id] + KERNEL_STACK_SIZE - sizeof(size_t);
/* build default stack frame */
*((size_t*)task_state_segments[id].esp) = 0xDEADBEAF; /* dead-end */
task_state_segments[id].ebp = task_state_segments[id].esp;
task_state_segments[id].esp -= sizeof(size_t);
*((size_t*)task_state_segments[id].esp) = (size_t) arg;
task_state_segments[id].esp -= sizeof(size_t);
*((size_t*)task_state_segments[id].esp) = (size_t) leave_kernel_task;
/* The difference between setting up a task for SW-task-switching
* and not for HW-task-switching is setting up a stack and not a TSS.
* This is the stack which will be activated and popped off for iret later.
*/
stack = (uint32_t*) (kstacks[id] + KERNEL_STACK_SIZE - sizeof(uint32_t));
/* setup for the kernel stack frame */
task_state_segments[id].ss0 = 0x10;
task_state_segments[id].esp0 = (uint32_t) kstacks[id] + KERNEL_STACK_SIZE - sizeof(size_t);
/* The next three things on the stack are a marker for debugging purposes, ... */
*stack-- = 0xDEADBEEF;
/* the first-function-to-be-called's arguments, ... */
*stack-- = (size_t) arg;
/* and the "caller" we shall return to.
* This procedure cleans the task after exit. */
*stack = (size_t) leave_kernel_task;
/* Next bunch on the stack is the initial register state.
* The stack must look like the stack of a task which was
* scheduled away previously. */
stack = (uint32_t*) ((size_t) stack - sizeof(struct state) + 2*sizeof(size_t));
stptr = (struct state *) stack;
memset(stptr, 0x00, sizeof(struct state) - 2*sizeof(size_t));
stptr->esp = (size_t)stack + sizeof(struct state) - 2*sizeof(size_t);
stptr->int_no = 0xB16B00B5;
stptr->error = 0xC03DB4B3;
/* The instruction pointer shall be set on the first function to be called
* after IRETing */
stptr->eip = (uint32_t)ep;
stptr->cs = cs;
stptr->eflags = 0x1202;
// the creation of a kernel tasks didn't change the IOPL level
// => useresp & ss is not required
/* Set the task's stack pointer entry to the stack we have crafted right now. */
task->stack = (size_t*)stack;
return 0;
}
@ -203,7 +211,7 @@ void gdt_install(void)
{
unsigned int i;
memset(task_state_segments, 0x00, MAX_TASKS*sizeof(tss_t));
memset(task_state_segments, 0x00, MAX_CORES*sizeof(tss_t));
/* Setup the GDT pointer and limit */
gp.limit = (sizeof(gdt_entry_t) * GDT_ENTRIES) - 1;
@ -247,10 +255,14 @@ void gdt_install(void)
/*
* Create TSS for each task at ring0 (we use these segments for task switching)
*/
for(i=0; i<MAX_TASKS; i++) {
for(i=0; i<MAX_CORES; i++) {
/* set default values */
task_state_segments[i].eflags = 0x1202;
task_state_segments[i].ss0 = 0x10; // data segment
task_state_segments[i].esp0 = 0xDEADBEEF; // invalid pseudo address
gdt_set_gate(5+i, (unsigned long) (task_state_segments+i), sizeof(tss_t)-1,
GDT_FLAG_PRESENT | GDT_FLAG_TSS | GDT_FLAG_RING0,
GDT_FLAG_32_BIT);
GDT_FLAG_PRESENT | GDT_FLAG_TSS | GDT_FLAG_RING0, GDT_FLAG_32_BIT);
}
/* Flush out the old GDT and install the new changes! */

View file

@ -29,6 +29,7 @@
#include <metalsvm/string.h>
#include <metalsvm/tasks.h>
#include <metalsvm/errno.h>
#include <metalsvm/page.h>
#include <asm/irq.h>
#include <asm/idt.h>
#include <asm/isrs.h>
@ -224,13 +225,15 @@ int irq_init(void)
* controller (an IRQ from 8 to 15) gets an interrupt, you need to
* acknowledge the interrupt at BOTH controllers, otherwise, you
* only send an EOI command to the first controller. If you don't send
* an EOI, it won't raise any more IRQs.\n
* \n
* an EOI, it won't raise any more IRQs.
*
* Note: If we enabled the APIC, we also disabled the PIC. Afterwards,
* we get no interrupts between 0 and 15.
*/
void irq_handler(struct state *s)
size_t** irq_handler(struct state *s)
{
size_t** ret = NULL;
/* This is a blank function pointer */
void (*handler) (struct state * s);
@ -276,7 +279,9 @@ void irq_handler(struct state *s)
leave_handler:
// timer interrupt?
if ((s->int_no == 32) || (s->int_no == 123))
scheduler(); // switch to a new task
ret = scheduler(); // switch to a new task
else if ((s->int_no >= 32) && (get_highest_priority() > per_core(current_task)->prio))
scheduler();
ret = scheduler();
return ret;
}

View file

@ -110,8 +110,12 @@ void load_balancing(void);
/** @brief Task switcher
*
* Timer-interrupted use of this function for task switching
*
* @return
* - 0 no context switch
* - !0 address of the old stack pointer
*/
void scheduler(void);
size_t** scheduler(void);
/** @brief Wake up a blocked task
*

View file

@ -71,6 +71,8 @@ typedef struct task {
tid_t id;
/// Task status (INVALID, READY, RUNNING, ...)
uint32_t status;
/// copy of the stack pointer before a context switch
size_t* stack;
/// Additional status flags. For instance, to signalize the using of the FPU
uint8_t flags;
/// Task priority
@ -83,29 +85,29 @@ typedef struct task {
struct task* prev;
/// last core id on which the task was running
uint32_t last_core;
/// Usage in number of pages
/// usage in number of pages
atomic_int32_t user_usage;
/// Avoids concurrent access to the page directory
/// avoids concurrent access to the page directory
spinlock_t pgd_lock;
/// pointer to the page directory
struct page_dir* pgd;
/// Lock for the VMA_list
/// lock for the VMA_list
spinlock_t vma_lock;
/// List of VMAs
/// list of VMAs
vma_t* vma_list;
/// Filedescriptor table
/// filedescriptor table
filp_t* fildes_table;
/// starting time/tick of the task
uint64_t start_tick;
/// Start address of the heap
/// start address of the heap
size_t start_heap;
/// End address of the heap
/// end address of the heap
size_t end_heap;
/// LwIP error code
int lwip_err;
/// Mail inbox
/// mail inbox
mailbox_wait_msg_t inbox;
/// Mail outbox array
/// mail outbox array
mailbox_wait_msg_t* outbox[MAX_TASKS];
/// FPU state
union fpu_state fpu;

View file

@ -253,7 +253,7 @@ int initd(void* arg)
#endif
// start echo, netio and rlogind
//echo_init();
echo_init();
create_user_task(&id, "/bin/rlogind", argv);
kprintf("Create rlogind with id %u\n", id);
//netio_init();

View file

@ -47,9 +47,8 @@
* A task's id will be its position in this array.
*/
static task_t task_table[MAX_TASKS] = { \
[0] = {0, TASK_IDLE, 0, 0, 0, NULL, NULL, 0, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, NULL, 0, 0, 0, 0}, \
[1 ... MAX_TASKS-1] = {0, TASK_INVALID, 0, 0, 0, NULL, NULL, 0, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, NULL, 0, 0, 0, 0}};
[0] = {0, TASK_IDLE, NULL, 0, 0, 0, NULL, NULL, 0, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, NULL, 0, 0, 0, 0}, \
[1 ... MAX_TASKS-1] = {0, TASK_INVALID, NULL, 0, 0, 0, NULL, NULL, 0, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, NULL, 0, 0, 0, 0}};
static spinlock_irqsave_t table_lock = SPINLOCK_IRQSAVE_INIT;
#if MAX_CORES > 1
static runqueue_t runqueues[MAX_CORES] = { \
@ -120,7 +119,7 @@ size_t get_idle_task(uint32_t id)
#endif
}
static void finish_task_switch(uint32_t irq)
void finish_task_switch(void)
{
uint8_t prio;
uint32_t core_id = CORE_ID;
@ -142,9 +141,6 @@ static void finish_task_switch(uint32_t irq)
runqueues[core_id].prio_bitmap |= (1 << prio);
}
spinlock_irqsave_unlock(&runqueues[core_id].lock);
if (irq)
irq_enable();
}
/** @brief Wakeup tasks which are waiting for a message from the current one
@ -214,7 +210,7 @@ static void NORETURN do_exit(int arg) {
irq_nested_enable(flags);
reschedule();
kprintf("Kernel panic: scheduler on core %d found no valid task\n", CORE_ID);
while(1) {
HALT;
@ -415,7 +411,6 @@ int sys_fork(void)
// Leave the function without releasing the locks
// because the locks are already released
// by the parent task!
finish_task_switch(1);
return 0;
}
@ -450,8 +445,6 @@ static int STDCALL kernel_entry(void* args)
int ret;
kernel_args_t* kernel_args = (kernel_args_t*) args;
finish_task_switch(1);
if (BUILTIN_EXPECT(!kernel_args, 0))
return -EINVAL;
@ -732,8 +725,6 @@ static int STDCALL user_entry(void* arg)
{
int ret;
finish_task_switch(1);
if (BUILTIN_EXPECT(!arg, 0))
return -EINVAL;
@ -1267,7 +1258,7 @@ void load_balancing(void)
}
#endif
void scheduler(void)
size_t** scheduler(void)
{
task_t* orig_task;
task_t* curr_task;
@ -1372,16 +1363,19 @@ get_task_out:
orig_task->flags &= ~TASK_FPU_USED;
}
//kprintf("schedule from %u to %u with prio %u on core %u\n",
// orig_task->id, curr_task->id, (uint32_t)curr_task->prio, CORE_ID);
switch_task(curr_task->id);
finish_task_switch(0);
//kprintf("schedule from %u to %u with prio %u on core %u\n", orig_task->id, curr_task->id, (uint32_t)curr_task->prio, CORE_ID);
return (size_t**) &(orig_task->stack);
}
return NULL;
}
void reschedule(void)
{
size_t** stack;
uint32_t flags = irq_nested_disable();
scheduler();
if ((stack = scheduler()))
switch_context(stack);
irq_nested_enable(flags);
}