Merge branch 'libsocket'
This commit is contained in:
commit
9631d861c3
15 changed files with 615 additions and 156 deletions
|
@ -272,6 +272,22 @@ static inline uint32_t read_eflags(void)
|
|||
return result;
|
||||
}
|
||||
|
||||
/** @brief search the first bit, which is set
|
||||
*
|
||||
* @param i source operand
|
||||
* @return first bit, which is set in the source operand
|
||||
*/
|
||||
static inline uint32_t last_set(uint32_t i)
|
||||
{
|
||||
uint32_t ret;
|
||||
|
||||
if (!i)
|
||||
return 0;
|
||||
asm volatile ("bsr %1, %0" : "=r"(ret) : "r"(i) : "flags");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/** @brief Read extended instruction pointer
|
||||
* @return The EIP's value
|
||||
*/
|
||||
|
|
|
@ -60,7 +60,7 @@ static uint32_t ncores = 1;
|
|||
static uint8_t irq_redirect[16] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xA, 0xB, 0xC, 0xD, 0xE, 0xF};
|
||||
#if MAX_CORES > 1
|
||||
static uint8_t boot_code[] = { 0xFA, 0x0F, 0x01, 0x16, 0x3B, 0x70, 0x0F, 0x20, 0xC0, 0x0C, 0x01, 0x0F, 0x22, 0xC0, 0x66, 0xEA, 0x16, 0x70, 0x00, 0x00, 0x08, 0x00, 0x31, 0xC0, 0x66, 0xB8, 0x10, 0x00, 0x8E, 0xD8, 0x8E, 0xC0, 0x8E, 0xE0, 0x8E, 0xE8, 0x8E, 0xD0, 0xBC, 0xEF, 0xBE, 0xAD, 0xDE, 0x68, 0xAD, 0xDE, 0xAD, 0xDE, 0x6A, 0x00, 0xEA, 0xDE, 0xC0, 0xAD, 0xDE, 0x08, 0x00, 0xEB, 0xFE, 0x17, 0x00, 0x41, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x9A, 0xCF, 0x00, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x92, 0xCF, 0x00};
|
||||
static atomic_int32_t cpu_online = ATOMIC_INIT(1);
|
||||
atomic_int32_t cpu_online = ATOMIC_INIT(1);
|
||||
#endif
|
||||
static uint8_t initialized = 0;
|
||||
spinlock_t bootlock = SPINLOCK_INIT;
|
||||
|
|
|
@ -36,6 +36,10 @@
|
|||
*/
|
||||
static volatile uint64_t timer_ticks = 0;
|
||||
|
||||
#if MAX_CORES > 1
|
||||
extern atomic_int32_t cpu_online;
|
||||
#endif
|
||||
|
||||
uint64_t get_clock_tick(void)
|
||||
{
|
||||
return timer_ticks;
|
||||
|
@ -61,8 +65,6 @@ int sys_times(struct tms* buffer, clock_t* clock)
|
|||
*/
|
||||
static void timer_handler(struct state *s)
|
||||
{
|
||||
uint32_t i;
|
||||
|
||||
/* Increment our 'tick counter' */
|
||||
#if MAX_CORES > 1
|
||||
if (smp_id() == 0)
|
||||
|
@ -78,6 +80,13 @@ static void timer_handler(struct state *s)
|
|||
vga_puts("One second has passed\n");
|
||||
}*/
|
||||
}
|
||||
|
||||
update_load();
|
||||
|
||||
#if MAX_CORES > 1
|
||||
if ((atomic_int32_read(&cpu_online) > 1) && (timer_ticks % (TIMER_FREQ/5) == 0))
|
||||
load_balancing();
|
||||
#endif
|
||||
}
|
||||
|
||||
int timer_wait(unsigned int ticks)
|
||||
|
@ -104,12 +113,7 @@ int timer_wait(unsigned int ticks)
|
|||
check_workqueues();
|
||||
|
||||
if (timer_ticks < eticks) {
|
||||
uint32_t flags = irq_nested_disable();
|
||||
curr_task->timeout = eticks;
|
||||
curr_task->flags |= TASK_TIMER_USED;
|
||||
curr_task->status = TASK_BLOCKED;
|
||||
irq_nested_enable(flags);
|
||||
|
||||
set_timer(eticks);
|
||||
reschedule();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -612,7 +612,9 @@ static void pagefault_handler(struct state *s)
|
|||
page_table_t* pgt = NULL;
|
||||
size_t viraddr = read_cr2();
|
||||
size_t phyaddr;
|
||||
#ifdef CONFIG_ROCKCREEK
|
||||
uint32_t index1, index2;
|
||||
#endif
|
||||
|
||||
if ((viraddr >= task->start_heap) && (viraddr <= task->end_heap) && (viraddr > KERNEL_SPACE)) {
|
||||
viraddr = viraddr & 0xFFFFF000;
|
||||
|
@ -630,7 +632,7 @@ static void pagefault_handler(struct state *s)
|
|||
put_page(phyaddr);
|
||||
}
|
||||
|
||||
|
||||
#ifdef CONFIG_ROCKCREEK
|
||||
// does our SVM system need to handle this page fault?
|
||||
index1 = viraddr >> 22;
|
||||
index2 = (viraddr >> 12) & 0x3FF;
|
||||
|
@ -642,12 +644,14 @@ static void pagefault_handler(struct state *s)
|
|||
if (pgt->entries[index2] & PG_SVM_STRONG)
|
||||
if (!svm_access_request(viraddr))
|
||||
return;
|
||||
#endif
|
||||
|
||||
default_handler:
|
||||
kprintf("PAGE FAULT: Task %u got page fault at %p (irq %d, cs:eip 0x%x:0x%x)\n", task->id, viraddr, s->int_no, s->cs, s->eip);
|
||||
kprintf("Register state: eax = 0x%x, ebx = 0x%x, ecx = 0x%x, edx = 0x%x, edi = 0x%x, esi = 0x%x, ebp = 0x%x, esp = 0x%x\n",
|
||||
s->eax, s->ebx, s->ecx, s->edx, s->edi, s->esi, s->ebp, s->esp);
|
||||
|
||||
while(1);
|
||||
irq_enable();
|
||||
abort();
|
||||
}
|
||||
|
|
|
@ -76,11 +76,11 @@ typedef struct block_list {
|
|||
} block_list_t;
|
||||
|
||||
typedef struct vfs_node {
|
||||
/// The permissions mask.
|
||||
/// The permissions mask.
|
||||
uint32_t mask;
|
||||
/// The owning user.
|
||||
/// The owning user.
|
||||
uint32_t uid;
|
||||
/// The owning group.
|
||||
/// The owning group.
|
||||
uint32_t gid;
|
||||
/// Includes the node type. See #defines above.
|
||||
uint32_t type;
|
||||
|
|
|
@ -124,7 +124,7 @@ next_try1:
|
|||
} else {
|
||||
s->queue[s->pos] = curr_task->id;
|
||||
s->pos = (s->pos + 1) % MAX_TASKS;
|
||||
curr_task->status = TASK_BLOCKED;
|
||||
block_current_task();
|
||||
spinlock_irqsave_unlock(&s->lock);
|
||||
reschedule();
|
||||
NOP2;
|
||||
|
@ -152,11 +152,10 @@ next_try2:
|
|||
}
|
||||
s->queue[s->pos] = curr_task->id;
|
||||
s->pos = (s->pos + 1) % MAX_TASKS;
|
||||
curr_task->timeout = deadline;
|
||||
curr_task->flags |= TASK_TIMER_USED;
|
||||
curr_task->status = TASK_BLOCKED;
|
||||
set_timer(deadline);
|
||||
spinlock_irqsave_unlock(&s->lock);
|
||||
reschedule();
|
||||
NOP2;
|
||||
goto next_try2;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -65,7 +65,7 @@ int multitasking_init(void);
|
|||
* - 0 on success
|
||||
* - -EINVAL (-22) on failure
|
||||
*/
|
||||
int create_kernel_task(tid_t* id, entry_point_t ep, void* arg);
|
||||
int create_kernel_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio);
|
||||
|
||||
/** @brief Create a user level task.
|
||||
*
|
||||
|
@ -85,6 +85,22 @@ int create_user_task(tid_t* id, const char* fame, char** argv);
|
|||
*/
|
||||
tid_t wait(int32_t* result);
|
||||
|
||||
/** @brief Update the load of the current core
|
||||
*
|
||||
* This function is called from the timer interrupt
|
||||
* and updates the load of the current core
|
||||
*/
|
||||
void update_load(void);
|
||||
|
||||
#if MAX_CORES > 1
|
||||
/** @brief Load balancer
|
||||
*
|
||||
* This load balancer is called from the timer interrupt
|
||||
* and steals tasks from other cores
|
||||
*/
|
||||
void load_balancing(void);
|
||||
#endif
|
||||
|
||||
/** @brief Task switcher
|
||||
*
|
||||
* Timer-interrupted use of this function for task switching */
|
||||
|
@ -100,6 +116,25 @@ void scheduler(void);
|
|||
*/
|
||||
int wakeup_task(tid_t);
|
||||
|
||||
/** @brief Block current task
|
||||
*
|
||||
* The current task's status will be changed to TASK_BLOCKED
|
||||
*
|
||||
* @return
|
||||
* - 0 on success
|
||||
* - -EINVAL (-22) on failure
|
||||
*/
|
||||
int block_current_task(void);
|
||||
|
||||
/** @brief Block current task until timer expires
|
||||
*
|
||||
* @param deadline Clock tick, when the timer expires
|
||||
* @return
|
||||
* - 0 on success
|
||||
* - -EINVAL (-22) on failure
|
||||
*/
|
||||
int set_timer(uint64_t deadline);
|
||||
|
||||
/** @brief Abort current task */
|
||||
void NORETURN abort(void);
|
||||
|
||||
|
|
|
@ -40,6 +40,13 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define MAX_PRIO 31
|
||||
#define REALTIME_PRIO 31
|
||||
#define HIGH_PRIO 16
|
||||
#define NORMAL_PRIO 8
|
||||
#define LOW_PRIO 1
|
||||
#define IDLE_PRIO 0
|
||||
|
||||
#define TASK_INVALID 0
|
||||
#define TASK_READY 1
|
||||
#define TASK_RUNNING 2
|
||||
|
@ -50,8 +57,6 @@ extern "C" {
|
|||
#define TASK_DEFAULT_FLAGS 0
|
||||
#define TASK_FPU_INIT (1 << 0)
|
||||
#define TASK_FPU_USED (1 << 1)
|
||||
#define TASK_TIMER_USED (1 << 2)
|
||||
#define TASK_SWITCH_IN_PROGRESS (1 << 3)
|
||||
|
||||
typedef int (*entry_point_t)(void*);
|
||||
typedef int (STDCALL *internal_entry_point_t)(void*);
|
||||
|
@ -64,11 +69,17 @@ typedef struct task {
|
|||
/// Task status (INVALID, READY, RUNNING, ...)
|
||||
uint32_t status;
|
||||
/// Additional status flags. For instance, to signalize the using of the FPU
|
||||
uint32_t flags;
|
||||
/// Number of used time slices
|
||||
uint32_t time_slices;
|
||||
uint8_t flags;
|
||||
/// Task priority
|
||||
uint8_t prio;
|
||||
/// timeout for a blocked task
|
||||
uint64_t timeout;
|
||||
/// next task in the queue
|
||||
struct task* next;
|
||||
/// previous task in the queue
|
||||
struct task* prev;
|
||||
/// last core id on which the task was running
|
||||
uint32_t last_core;
|
||||
/// Usage in number of pages
|
||||
atomic_int32_t user_usage;
|
||||
/// Avoids concurrent access to the page directory
|
||||
|
@ -82,9 +93,9 @@ typedef struct task {
|
|||
/// starting time/tick of the task
|
||||
uint64_t start_tick;
|
||||
/// Start address of the heap
|
||||
uint32_t start_heap;
|
||||
size_t start_heap;
|
||||
/// End address of the heap
|
||||
uint32_t end_heap;
|
||||
size_t end_heap;
|
||||
/// LwIP error code
|
||||
int lwip_err;
|
||||
/// Mail inbox
|
||||
|
@ -95,6 +106,34 @@ typedef struct task {
|
|||
union fpu_state fpu;
|
||||
} task_t;
|
||||
|
||||
typedef struct {
|
||||
task_t* first;
|
||||
task_t* last;
|
||||
} task_list_t;
|
||||
|
||||
typedef struct {
|
||||
/// idle task
|
||||
task_t* idle __attribute__ ((aligned (CACHE_LINE)));
|
||||
/// previous task
|
||||
task_t* old_task;
|
||||
/// total number of tasks in the queue
|
||||
uint32_t nr_tasks;
|
||||
// current load = average number of tasks in the queue (1-minute average)
|
||||
uint32_t load;
|
||||
// help counter to determine the the cpu load
|
||||
int32_t load_counter;
|
||||
// help counter to avoid "over balancing"
|
||||
int32_t balance_counter;
|
||||
/// indicates the used priority queues
|
||||
uint32_t prio_bitmap;
|
||||
/// a queue for each priority
|
||||
task_list_t queue[MAX_PRIO];
|
||||
/// a queue for timers
|
||||
task_list_t timers;
|
||||
/// lock for this runqueue
|
||||
spinlock_t lock;
|
||||
} runqueue_t;
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -42,7 +42,7 @@ int cli_ConnectTo(Client* cli,char * pAdresse,unsigned short Port,int webAdresse
|
|||
if (connect(cli->sSocket,(const struct sockaddr*)&cli->adAddr, sizeof(cli->adAddr))==0)
|
||||
{
|
||||
|
||||
create_kernel_task(&cli->bThread,cli_WaitForPacket,cli);
|
||||
create_kernel_task(&cli->bThread,cli_WaitForPacket,cli, NORMAL_PRIO);
|
||||
|
||||
if (cli->_OnConnect != 0)
|
||||
{
|
||||
|
|
|
@ -91,8 +91,7 @@ int main(void)
|
|||
kprintf("Current available memory: %u MBytes\n", atomic_int32_read(&total_available_pages)/((1024*1024)/PAGE_SIZE));
|
||||
|
||||
sleep(5);
|
||||
create_kernel_task(NULL, initd, NULL);
|
||||
per_core(current_task)->time_slices = 0; // reset the number of time slices
|
||||
create_kernel_task(NULL, initd, NULL, NORMAL_PRIO);
|
||||
reschedule();
|
||||
|
||||
while(1) {
|
||||
|
|
|
@ -78,7 +78,7 @@ void* srv_WaitForConnection(Server* srv)
|
|||
t = (ServerThreadArgs*) kmalloc(sizeof(ServerThreadArgs));
|
||||
t->ID = i;
|
||||
t->srv = srv;
|
||||
create_kernel_task(&srv->bThreads[i],srv_WaitForPacket,t);
|
||||
create_kernel_task(&srv->bThreads[i],srv_WaitForPacket,t, NORMAL_PRIO);
|
||||
|
||||
break;
|
||||
}
|
||||
|
@ -175,7 +175,7 @@ int server_init(Server* srv, unsigned short Port, unsigned int dwMaxConnections)
|
|||
bind( srv->sSocket,(const struct sockaddr *) &srv->adAddr, sizeof(srv->adAddr)); // Der Server an die Adresse binden;
|
||||
listen(srv->sSocket,srv->dwMaximumConnections); // Den Server in listenig State versetzen
|
||||
|
||||
create_kernel_task(&srv->bThread_listen,srv_WaitForConnection,srv);
|
||||
create_kernel_task(&srv->bThread_listen,srv_WaitForConnection,srv, NORMAL_PRIO);
|
||||
// sConnections[0] = accept(sSocket,(struct sockaddr*)&tmpAddr,&tmpAddrLen);
|
||||
// t.ID = 0;
|
||||
// bthread_create(&bThreads[0],NULL,(start_routine) srv_WaitForPacket,&t);
|
||||
|
|
586
kernel/tasks.c
586
kernel/tasks.c
|
@ -47,14 +47,19 @@
|
|||
* A task's id will be its position in this array.
|
||||
*/
|
||||
static task_t task_table[MAX_TASKS] = { \
|
||||
[0] = {0, TASK_IDLE, 0, 0, 0, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, 0, 0, 0, 0}, \
|
||||
[1 ... MAX_TASKS-1] = {0, TASK_INVALID, 0, 0, 0, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, 0, 0, 0, 0}};
|
||||
[0] = {0, TASK_IDLE, 0, 0, 0, NULL, NULL, 0, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, 0, 0, 0, 0}, \
|
||||
[1 ... MAX_TASKS-1] = {0, TASK_INVALID, 0, 0, 0, NULL, NULL, 0, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, 0, 0, 0, 0}};
|
||||
static spinlock_irqsave_t table_lock = SPINLOCK_IRQSAVE_INIT;
|
||||
#if MAX_CORES > 1
|
||||
static runqueue_t runqueues[MAX_CORES] = { \
|
||||
[0] = {task_table+0, NULL, 0, 0, 0, 0, 0, {[0 ... MAX_PRIO-1] = {NULL, NULL}}, {NULL, NULL}, SPINLOCK_INIT}, \
|
||||
[1 ... MAX_CORES-1] = {NULL, NULL, 0, 0, 0, 0, 0, {[0 ... MAX_PRIO-1] = {NULL, NULL}}, {NULL, NULL}, SPINLOCK_INIT}};
|
||||
#else
|
||||
static runqueue_t runqueues[1] = { \
|
||||
[0] = {task_table+0, NULL, 0, 0, 0, 0, 0, {[0 ... MAX_PRIO-1] = {NULL, NULL}}, {NULL, NULL}, SPINLOCK_INIT}};
|
||||
#endif
|
||||
|
||||
DEFINE_PER_CORE(task_t*, current_task, task_table+0);
|
||||
#if MAX_CORES > 1
|
||||
DEFINE_PER_CORE_STATIC(task_t*, old_task, NULL);
|
||||
#endif
|
||||
|
||||
/** @brief helper function for the assembly code to determine the current task
|
||||
* @return Pointer to the task_t structure of current task
|
||||
|
@ -63,24 +68,6 @@ task_t* get_current_task(void) {
|
|||
return per_core(current_task);
|
||||
}
|
||||
|
||||
int dump_scheduling_statistics(void)
|
||||
{
|
||||
uint32_t i;
|
||||
uint32_t id = 0;
|
||||
|
||||
kprintf("Scheduling statistics:\n");
|
||||
kprintf("======================\n");
|
||||
kprintf("total ticks:\t%llu\n", get_clock_tick());
|
||||
for(i=0; i<MAX_CORES; i++) {
|
||||
if (task_table[i].status == TASK_IDLE) {
|
||||
kprintf("core %d :\t%u idle slices\n", id, task_table[i].time_slices);
|
||||
id++;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int multitasking_init(void) {
|
||||
if (BUILTIN_EXPECT(task_table[0].status != TASK_IDLE, 0)) {
|
||||
kputs("Task 0 is not an idle task\n");
|
||||
|
@ -91,6 +78,7 @@ int multitasking_init(void) {
|
|||
memset(task_table[0].outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS);
|
||||
task_table[0].pgd = get_boot_pgd();
|
||||
task_table[0].flags = TASK_DEFAULT_FLAGS;
|
||||
task_table[0].prio = IDLE_PRIO;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -103,13 +91,15 @@ size_t get_idle_task(uint32_t id)
|
|||
|
||||
task_table[id].id = id;
|
||||
task_table[id].status = TASK_IDLE;
|
||||
task_table[id].prio = IDLE_PRIO;
|
||||
task_table[id].flags = TASK_DEFAULT_FLAGS;
|
||||
task_table[id].time_slices = 0;
|
||||
task_table[id].last_core = id;
|
||||
atomic_int32_set(&task_table[id].user_usage, 0);
|
||||
mailbox_wait_msg_init(&task_table[id].inbox);
|
||||
memset(task_table[id].outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS);
|
||||
task_table[id].pgd = get_boot_pgd();
|
||||
current_task[id].var = task_table+id;
|
||||
runqueues[id].idle = task_table+id;
|
||||
|
||||
return get_stack(id);
|
||||
#else
|
||||
|
@ -117,6 +107,32 @@ size_t get_idle_task(uint32_t id)
|
|||
#endif
|
||||
}
|
||||
|
||||
static void finish_task_switch(void)
|
||||
{
|
||||
uint8_t prio;
|
||||
uint32_t core_id = CORE_ID;
|
||||
task_t* old;
|
||||
|
||||
spinlock_lock(&runqueues[core_id].lock);
|
||||
if ((old = runqueues[core_id].old_task) != NULL) {
|
||||
prio = old->prio;
|
||||
if (!runqueues[core_id].queue[prio-1].first) {
|
||||
old->prev = NULL;
|
||||
runqueues[core_id].queue[prio-1].first = runqueues[core_id].queue[prio-1].last = old;
|
||||
} else {
|
||||
old->prev = runqueues[core_id].queue[prio-1].last;
|
||||
runqueues[core_id].queue[prio-1].last->next = old;
|
||||
runqueues[core_id].queue[prio-1].last = old;
|
||||
}
|
||||
runqueues[core_id].old_task = NULL;
|
||||
runqueues[core_id].prio_bitmap |= (1 << prio);
|
||||
old->next = NULL;
|
||||
}
|
||||
spinlock_unlock(&runqueues[core_id].lock);
|
||||
|
||||
irq_enable();
|
||||
}
|
||||
|
||||
/** @brief Wakeup tasks which are waiting for a message from the current one
|
||||
*
|
||||
* @param result Current task's resulting return value
|
||||
|
@ -145,6 +161,7 @@ static void wakeup_blocked_tasks(int result)
|
|||
static void NORETURN do_exit(int arg) {
|
||||
vma_t* tmp;
|
||||
task_t* curr_task = per_core(current_task);
|
||||
uint32_t flags, core_id;
|
||||
|
||||
kprintf("Terminate task: %u, return value %d\n", curr_task->id, arg);
|
||||
|
||||
|
@ -168,6 +185,15 @@ static void NORETURN do_exit(int arg) {
|
|||
kprintf("Memory leak! Task %d did not release %d pages\n",
|
||||
curr_task->id, atomic_int32_read(&curr_task->user_usage));
|
||||
curr_task->status = TASK_FINISHED;
|
||||
|
||||
// decrease the number of active tasks
|
||||
flags = irq_nested_disable();
|
||||
core_id = CORE_ID;
|
||||
spinlock_lock(&runqueues[core_id].lock);
|
||||
runqueues[core_id].nr_tasks--;
|
||||
spinlock_unlock(&runqueues[core_id].lock);
|
||||
irq_nested_enable(flags);
|
||||
|
||||
reschedule();
|
||||
|
||||
kprintf("Kernel panic: scheduler on core %d found no valid task\n", CORE_ID);
|
||||
|
@ -203,17 +229,22 @@ void NORETURN abort(void) {
|
|||
* - 0 on success
|
||||
* - -ENOMEM (-12) or -EINVAL (-22) on failure
|
||||
*/
|
||||
static int create_task(tid_t* id, internal_entry_point_t ep, void* arg)
|
||||
static int create_task(tid_t* id, internal_entry_point_t ep, void* arg, uint8_t prio)
|
||||
{
|
||||
task_t* curr_task;
|
||||
int ret = -ENOMEM;
|
||||
unsigned int i;
|
||||
unsigned int i, core_id;
|
||||
|
||||
if (BUILTIN_EXPECT(!ep, 0))
|
||||
return -EINVAL;
|
||||
if (BUILTIN_EXPECT(prio == IDLE_PRIO, 0))
|
||||
return -EINVAL;
|
||||
if (BUILTIN_EXPECT(prio > MAX_PRIO, 0))
|
||||
return -EINVAL;
|
||||
|
||||
spinlock_irqsave_lock(&table_lock);
|
||||
|
||||
core_id = CORE_ID;
|
||||
curr_task = per_core(current_task);
|
||||
|
||||
for(i=0; i<MAX_TASKS; i++) {
|
||||
|
@ -229,7 +260,8 @@ static int create_task(tid_t* id, internal_entry_point_t ep, void* arg)
|
|||
task_table[i].id = i;
|
||||
task_table[i].status = TASK_READY;
|
||||
task_table[i].flags = TASK_DEFAULT_FLAGS;
|
||||
task_table[i].time_slices = 0;
|
||||
task_table[i].prio = prio;
|
||||
task_table[i].last_core = 0;
|
||||
spinlock_init(&task_table[i].vma_lock);
|
||||
task_table[i].vma_list = NULL;
|
||||
mailbox_wait_msg_init(&task_table[i].inbox);
|
||||
|
@ -245,6 +277,23 @@ static int create_task(tid_t* id, internal_entry_point_t ep, void* arg)
|
|||
task_table[i].end_heap = 0;
|
||||
task_table[i].lwip_err = 0;
|
||||
task_table[i].start_tick = get_clock_tick();
|
||||
|
||||
// add task in the runqueue
|
||||
spinlock_lock(&runqueues[core_id].lock);
|
||||
runqueues[core_id].prio_bitmap |= (1 << prio);
|
||||
runqueues[core_id].nr_tasks++;
|
||||
if (!runqueues[core_id].queue[prio-1].first) {
|
||||
task_table[i].prev = NULL;
|
||||
runqueues[core_id].queue[prio-1].first = task_table+i;
|
||||
runqueues[core_id].queue[prio-1].last = task_table+i;
|
||||
task_table[i].next = NULL;
|
||||
} else {
|
||||
task_table[i].prev = runqueues[core_id].queue[prio-1].last;
|
||||
runqueues[core_id].queue[prio-1].last->next = task_table+i;
|
||||
runqueues[core_id].queue[prio-1].last = task_table+i;
|
||||
task_table[i].next = NULL;
|
||||
}
|
||||
spinlock_unlock(&runqueues[core_id].lock);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -258,7 +307,7 @@ create_task_out:
|
|||
int sys_fork(void)
|
||||
{
|
||||
int ret = -ENOMEM;
|
||||
unsigned int i;
|
||||
unsigned int i, core_id;
|
||||
task_t* parent_task = per_core(current_task);
|
||||
vma_t** child;
|
||||
vma_t* parent;
|
||||
|
@ -267,6 +316,8 @@ int sys_fork(void)
|
|||
spinlock_lock(&parent_task->vma_lock);
|
||||
spinlock_irqsave_lock(&table_lock);
|
||||
|
||||
core_id = CORE_ID;
|
||||
|
||||
for(i=0; i<MAX_TASKS; i++) {
|
||||
if (task_table[i].status == TASK_INVALID) {
|
||||
atomic_int32_set(&task_table[i].user_usage, 0);
|
||||
|
@ -304,12 +355,31 @@ int sys_fork(void)
|
|||
mailbox_wait_msg_init(&task_table[i].inbox);
|
||||
memset(task_table[i].outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS);
|
||||
task_table[i].outbox[parent_task->id] = &parent_task->inbox;
|
||||
task_table[i].flags = parent_task->flags & ~TASK_SWITCH_IN_PROGRESS;
|
||||
task_table[i].flags = parent_task->flags;
|
||||
memcpy(&(task_table[i].fpu), &(parent_task->fpu), sizeof(union fpu_state));
|
||||
task_table[i].start_tick = get_clock_tick();
|
||||
task_table[i].start_heap = 0;
|
||||
task_table[i].end_heap = 0;
|
||||
task_table[i].lwip_err = 0;
|
||||
task_table[i].prio = parent_task->prio;
|
||||
task_table[i].last_core = parent_task->last_core;
|
||||
|
||||
// add task in the runqueue
|
||||
spinlock_lock(&runqueues[core_id].lock);
|
||||
runqueues[core_id].prio_bitmap |= (1 << parent_task->prio);
|
||||
runqueues[core_id].nr_tasks++;
|
||||
if (!runqueues[core_id].queue[parent_task->prio-1].first) {
|
||||
task_table[i].prev = NULL;
|
||||
runqueues[core_id].queue[parent_task->prio-1].first = task_table+i;
|
||||
runqueues[core_id].queue[parent_task->prio-1].last = task_table+i;
|
||||
task_table[i].next = NULL;
|
||||
} else {
|
||||
task_table[i].prev = runqueues[core_id].queue[parent_task->prio-1].last;
|
||||
runqueues[core_id].queue[parent_task->prio-1].last->next = task_table+i;
|
||||
runqueues[core_id].queue[parent_task->prio-1].last = task_table+i;
|
||||
task_table[i].next = NULL;
|
||||
}
|
||||
spinlock_unlock(&runqueues[core_id].lock);
|
||||
|
||||
ret = arch_fork(task_table+i);
|
||||
|
||||
|
@ -318,13 +388,7 @@ int sys_fork(void)
|
|||
// Leave the function without releasing the locks
|
||||
// because the locks are already released
|
||||
// by the parent task!
|
||||
#if MAX_CORES > 1
|
||||
task_t* old = per_core(old_task);
|
||||
|
||||
if (old)
|
||||
old->flags &= ~TASK_SWITCH_IN_PROGRESS;
|
||||
#endif
|
||||
irq_enable();
|
||||
finish_task_switch();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -358,13 +422,8 @@ static int STDCALL kernel_entry(void* args)
|
|||
{
|
||||
int ret;
|
||||
kernel_args_t* kernel_args = (kernel_args_t*) args;
|
||||
#if MAX_CORES > 1
|
||||
task_t* old = per_core(old_task);
|
||||
|
||||
if (old)
|
||||
old->flags &= ~TASK_SWITCH_IN_PROGRESS;
|
||||
#endif
|
||||
irq_enable();
|
||||
finish_task_switch();
|
||||
|
||||
if (BUILTIN_EXPECT(!kernel_args, 0))
|
||||
return -EINVAL;
|
||||
|
@ -376,7 +435,7 @@ static int STDCALL kernel_entry(void* args)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int create_kernel_task(tid_t* id, entry_point_t ep, void* args)
|
||||
int create_kernel_task(tid_t* id, entry_point_t ep, void* args, uint8_t prio)
|
||||
{
|
||||
kernel_args_t* kernel_args;
|
||||
|
||||
|
@ -387,7 +446,10 @@ int create_kernel_task(tid_t* id, entry_point_t ep, void* args)
|
|||
kernel_args->func = ep;
|
||||
kernel_args->args = args;
|
||||
|
||||
return create_task(id, kernel_entry, kernel_args);
|
||||
if (prio > MAX_PRIO)
|
||||
prio = NORMAL_PRIO;
|
||||
|
||||
return create_task(id, kernel_entry, kernel_args, prio);
|
||||
}
|
||||
|
||||
#define MAX_ARGS (PAGE_SIZE - 2*sizeof(int) - sizeof(vfs_node_t*))
|
||||
|
@ -616,13 +678,8 @@ invalid:
|
|||
static int STDCALL user_entry(void* arg)
|
||||
{
|
||||
int ret;
|
||||
#if MAX_CORES > 1
|
||||
task_t* old = per_core(old_task);
|
||||
|
||||
if (old)
|
||||
old->flags &= ~TASK_SWITCH_IN_PROGRESS;
|
||||
#endif
|
||||
irq_enable();
|
||||
finish_task_switch();
|
||||
|
||||
if (BUILTIN_EXPECT(!arg, 0))
|
||||
return -EINVAL;
|
||||
|
@ -680,7 +737,7 @@ int create_user_task(tid_t* id, const char* fname, char** argv)
|
|||
while ((*dest++ = *src++) != 0);
|
||||
}
|
||||
|
||||
return create_task(id, user_entry, load_args);
|
||||
return create_task(id, user_entry, load_args, NORMAL_PRIO);
|
||||
}
|
||||
|
||||
/** @brief Used by the execve-Systemcall */
|
||||
|
@ -791,54 +848,320 @@ tid_t wait(int32_t* result)
|
|||
*/
|
||||
int wakeup_task(tid_t id)
|
||||
{
|
||||
task_t* task;
|
||||
uint32_t core_id, prio;
|
||||
uint32_t flags;
|
||||
int ret = -EINVAL;
|
||||
|
||||
spinlock_irqsave_lock(&table_lock);
|
||||
flags = irq_nested_disable();
|
||||
|
||||
task = task_table + id;
|
||||
prio = task->prio;
|
||||
core_id = task->last_core;
|
||||
|
||||
if (task_table[id].status == TASK_BLOCKED) {
|
||||
task_table[id].status = TASK_READY;
|
||||
ret = 0;
|
||||
|
||||
spinlock_lock(&runqueues[core_id].lock);
|
||||
// increase the number of ready tasks
|
||||
runqueues[core_id].nr_tasks++;
|
||||
|
||||
// add task to the runqueue
|
||||
if (!runqueues[core_id].queue[prio-1].last) {
|
||||
runqueues[core_id].queue[prio-1].last = runqueues[core_id].queue[prio-1].first = task;
|
||||
task->next = task->prev = NULL;
|
||||
runqueues[core_id].prio_bitmap |= (1 << prio);
|
||||
} else {
|
||||
task->prev = runqueues[core_id].queue[prio-1].last;
|
||||
task->next = NULL;
|
||||
runqueues[core_id].queue[prio-1].last->next = task;
|
||||
runqueues[core_id].queue[prio-1].last = task;
|
||||
}
|
||||
spinlock_unlock(&runqueues[core_id].lock);
|
||||
}
|
||||
|
||||
spinlock_irqsave_unlock(&table_lock);
|
||||
irq_nested_enable(flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* we use this struct to guarantee that the id
|
||||
* has its own cache line
|
||||
*/
|
||||
typedef struct {
|
||||
uint32_t id __attribute__ ((aligned (CACHE_LINE)));
|
||||
uint8_t gap[CACHE_LINE-sizeof(uint32_t)];
|
||||
} last_id_t;
|
||||
|
||||
/** @brief _The_ scheduler procedure
|
||||
/** @brief Block current task
|
||||
*
|
||||
* Manages scheduling - right now this is just a round robin scheduler.
|
||||
* The current task's status will be changed to TASK_BLOCKED
|
||||
*
|
||||
* @return
|
||||
* - 0 on success
|
||||
* - -EINVAL (-22) on failure
|
||||
*/
|
||||
void scheduler(void)
|
||||
int block_current_task(void)
|
||||
{
|
||||
task_t* curr_task;
|
||||
tid_t id;
|
||||
uint32_t core_id, prio;
|
||||
uint32_t flags;
|
||||
int ret = -EINVAL;
|
||||
|
||||
flags = irq_nested_disable();
|
||||
|
||||
curr_task = per_core(current_task);
|
||||
id = curr_task->id;
|
||||
prio = curr_task->prio;
|
||||
core_id = CORE_ID;
|
||||
|
||||
if (task_table[id].status == TASK_RUNNING) {
|
||||
task_table[id].status = TASK_BLOCKED;
|
||||
ret = 0;
|
||||
|
||||
spinlock_lock(&runqueues[core_id].lock);
|
||||
// reduce the number of ready tasks
|
||||
runqueues[core_id].nr_tasks--;
|
||||
|
||||
// remove task from queue
|
||||
if (task_table[id].prev)
|
||||
task_table[id].prev->next = task_table[id].next;
|
||||
if (task_table[id].next)
|
||||
task_table[id].next->prev = task_table[id].prev;
|
||||
if (runqueues[core_id].queue[prio-1].first == task_table+id)
|
||||
runqueues[core_id].queue[prio-1].first = task_table[id].next;
|
||||
if (runqueues[core_id].queue[prio-1].last == task_table+id) {
|
||||
runqueues[core_id].queue[prio-1].last = task_table[id].prev;
|
||||
if (!runqueues[core_id].queue[prio-1].last)
|
||||
runqueues[core_id].queue[prio-1].last = runqueues[core_id].queue[prio-1].first;
|
||||
}
|
||||
|
||||
// No valid task in queue => update prio_bitmap
|
||||
if (!runqueues[core_id].queue[prio-1].first)
|
||||
runqueues[core_id].prio_bitmap &= ~(1 << prio);
|
||||
|
||||
spinlock_unlock(&runqueues[core_id].lock);
|
||||
}
|
||||
|
||||
irq_nested_enable(flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int set_timer(uint64_t deadline)
|
||||
{
|
||||
task_t* curr_task;
|
||||
task_t* tmp;
|
||||
uint32_t core_id, prio;
|
||||
uint32_t flags;
|
||||
int ret = -EINVAL;
|
||||
|
||||
flags = irq_nested_disable();
|
||||
|
||||
curr_task = per_core(current_task);
|
||||
prio = curr_task->prio;
|
||||
core_id = CORE_ID;
|
||||
|
||||
if (curr_task->status == TASK_RUNNING) {
|
||||
curr_task->status = TASK_BLOCKED;
|
||||
curr_task->timeout = deadline;
|
||||
ret = 0;
|
||||
|
||||
spinlock_lock(&runqueues[core_id].lock);
|
||||
|
||||
// reduce the number of ready tasks
|
||||
runqueues[core_id].nr_tasks--;
|
||||
|
||||
// remove task from queue
|
||||
if (curr_task->prev)
|
||||
curr_task->prev->next = curr_task->next;
|
||||
if (curr_task->next)
|
||||
curr_task->next->prev = curr_task->prev;
|
||||
if (runqueues[core_id].queue[prio-1].first == curr_task)
|
||||
runqueues[core_id].queue[prio-1].first = curr_task->next;
|
||||
if (runqueues[core_id].queue[prio-1].last == curr_task) {
|
||||
runqueues[core_id].queue[prio-1].last = curr_task->prev;
|
||||
if (!runqueues[core_id].queue[prio-1].last)
|
||||
runqueues[core_id].queue[prio-1].last = runqueues[core_id].queue[prio-1].first;
|
||||
}
|
||||
|
||||
// No valid task in queue => update prio_bitmap
|
||||
if (!runqueues[core_id].queue[prio-1].first)
|
||||
runqueues[core_id].prio_bitmap &= ~(1 << prio);
|
||||
|
||||
// add task to the timer queue
|
||||
tmp = runqueues[core_id].timers.first;
|
||||
if (!tmp) {
|
||||
runqueues[core_id].timers.first = runqueues[core_id].timers.last = curr_task;
|
||||
curr_task->prev = curr_task->next = NULL;
|
||||
} else {
|
||||
while(tmp && (deadline >= tmp->timeout))
|
||||
tmp = tmp->next;
|
||||
|
||||
if (!tmp) {
|
||||
curr_task->next = NULL;
|
||||
curr_task->prev = runqueues[core_id].timers.last;
|
||||
if (runqueues[core_id].timers.last)
|
||||
runqueues[core_id].timers.last->next = curr_task;
|
||||
runqueues[core_id].timers.last = curr_task;
|
||||
if (!runqueues[core_id].timers.first)
|
||||
runqueues[core_id].timers.first = curr_task;
|
||||
} else {
|
||||
curr_task->prev = tmp->prev;
|
||||
curr_task->next = tmp;
|
||||
tmp->prev = curr_task;
|
||||
if (curr_task->prev)
|
||||
curr_task->prev->next = curr_task;
|
||||
if (runqueues[core_id].timers.first == tmp)
|
||||
runqueues[core_id].timers.first = curr_task;
|
||||
}
|
||||
}
|
||||
|
||||
spinlock_unlock(&runqueues[core_id].lock);
|
||||
} else kprintf("Task is already blocked. No timer will be set!\n");
|
||||
|
||||
irq_nested_enable(flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define FSHIFT 21 /* nr of bits of precision (e.g. 11) */
|
||||
#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */
|
||||
#define EXP 1884 /* 1/exp(5sec/1min) as fixed-point */
|
||||
|
||||
void update_load(void)
|
||||
{
|
||||
uint32_t core_id = CORE_ID;
|
||||
|
||||
runqueues[core_id].load_counter--;
|
||||
if (runqueues[core_id].balance_counter > 0)
|
||||
runqueues[core_id].balance_counter--;
|
||||
if (runqueues[core_id].load_counter < 0) {
|
||||
runqueues[core_id].load_counter += 5*TIMER_FREQ;
|
||||
|
||||
spinlock_lock(&runqueues[core_id].lock);
|
||||
runqueues[core_id].load *= EXP;
|
||||
runqueues[core_id].load += runqueues[core_id].nr_tasks*(FIXED_1-EXP);
|
||||
runqueues[core_id].load >>= FSHIFT;
|
||||
spinlock_unlock(&runqueues[core_id].lock);
|
||||
|
||||
//kprintf("load of core %u: %u, %u\n", core_id, runqueues[core_id].load, runqueues[core_id].nr_tasks);
|
||||
}
|
||||
}
|
||||
|
||||
#if MAX_CORES > 1
|
||||
extern atomic_int32_t cpu_online;
|
||||
|
||||
void load_balancing(void)
|
||||
{
|
||||
#if 0
|
||||
uint32_t i, core_id = CORE_ID;
|
||||
uint32_t prio;
|
||||
task_t* task;
|
||||
|
||||
spinlock_lock(&runqueues[core_id].lock);
|
||||
for(i=0; (i<atomic_int32_read(&cpu_online)) && (runqueues[core_id].balance_counter <= 0); i++)
|
||||
{
|
||||
if (i == core_id)
|
||||
break;
|
||||
|
||||
spinlock_lock(&runqueues[i].lock);
|
||||
if (runqueues[i].load > runqueues[core_id].load) {
|
||||
kprintf("Try to steal a task from core %u (load %u) to %u (load %u)\n", i, runqueues[i].load, core_id, runqueues[core_id].load);
|
||||
kprintf("Task on core %u: %u, core %u, %u\n", i, runqueues[i].nr_tasks, core_id, runqueues[i].nr_tasks);
|
||||
|
||||
prio = last_set(runqueues[i].prio_bitmap);
|
||||
if (prio) {
|
||||
// steal a ready task
|
||||
task = runqueues[i].queue[prio-1].last;
|
||||
kprintf("Try to steal a ready task %d\n", task->id);
|
||||
|
||||
// remove last element from queue i
|
||||
if (task->prev)
|
||||
task->prev->next = NULL;
|
||||
runqueues[i].queue[prio-1].last = task->prev;
|
||||
if (!runqueues[i].queue[prio-1].last)
|
||||
runqueues[i].queue[prio-1].first = NULL;
|
||||
|
||||
// add task at the end of queue core_id
|
||||
if (!runqueues[core_id].queue[prio-1].last) {
|
||||
runqueues[core_id].queue[prio-1].first = runqueues[core_id].queue[prio-1].last = task;
|
||||
task->next = task->prev = NULL;
|
||||
} else {
|
||||
runqueues[core_id].queue[prio-1].last->next = task;
|
||||
task->prev = runqueues[core_id].queue[prio-1].last;
|
||||
runqueues[core_id].queue[prio-1].last = task;
|
||||
task->next = NULL;
|
||||
}
|
||||
|
||||
// update task counters
|
||||
runqueues[core_id].nr_tasks++;
|
||||
runqueues[i].nr_tasks--;
|
||||
runqueues[core_id].balance_counter = 5*TIMER_FREQ;
|
||||
} else {
|
||||
task_t* tmp;
|
||||
|
||||
// steal a blocked task
|
||||
task = runqueues[i].timers.first;
|
||||
if (!task) // Ups, found no valid task to steal
|
||||
goto no_task_found;
|
||||
|
||||
kprintf("Try to steal blocked task %d\n", task->id);
|
||||
|
||||
// remove first timer from queue i
|
||||
if (runqueues[i].timers.first == runqueues[i].timers.last)
|
||||
runqueues[i].timers.first = runqueues[i].timers.last = NULL;
|
||||
else
|
||||
runqueues[i].timers.first = runqueues[i].timers.first->next;
|
||||
|
||||
// add timer to queue core_id
|
||||
tmp = runqueues[core_id].timers.first;
|
||||
while(tmp && (task->timeout >= tmp->timeout))
|
||||
tmp = tmp->next;
|
||||
|
||||
if (!tmp) {
|
||||
task->next = NULL;
|
||||
task->prev = runqueues[core_id].timers.last;
|
||||
if (runqueues[core_id].timers.last)
|
||||
runqueues[core_id].timers.last->next = task;
|
||||
runqueues[core_id].timers.last = task;
|
||||
if (!runqueues[core_id].timers.first)
|
||||
runqueues[core_id].timers.first = task;
|
||||
} else {
|
||||
task->prev = tmp->prev;
|
||||
task->next = tmp;
|
||||
tmp->prev = task;
|
||||
if (task->prev)
|
||||
task->prev->next = task;
|
||||
if (runqueues[core_id].timers.first == tmp)
|
||||
runqueues[core_id].timers.first = task;
|
||||
}
|
||||
|
||||
// => reschedule on the new core
|
||||
task->last_core = CORE_ID;
|
||||
|
||||
// update task counters
|
||||
runqueues[core_id].nr_tasks++;
|
||||
runqueues[i].nr_tasks--;
|
||||
runqueues[core_id].balance_counter = 5*TIMER_FREQ;
|
||||
}
|
||||
}
|
||||
no_task_found:
|
||||
spinlock_unlock(&runqueues[i].lock);
|
||||
}
|
||||
spinlock_unlock(&runqueues[core_id].lock);
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
void scheduler(void)
|
||||
{
|
||||
task_t* orig_task;
|
||||
task_t* curr_task;
|
||||
uint32_t i;
|
||||
uint32_t new_id;
|
||||
uint32_t core_id = CORE_ID;
|
||||
uint32_t prio;
|
||||
uint64_t current_tick;
|
||||
static last_id_t last_id = { 0 };
|
||||
|
||||
#if MAX_CORES > 1
|
||||
spinlock_irqsave_lock(&table_lock);
|
||||
#endif
|
||||
current_tick = get_clock_tick();
|
||||
orig_task = curr_task = per_core(current_task);
|
||||
|
||||
/* increase the number of used time slices */
|
||||
curr_task->time_slices++;
|
||||
curr_task->last_core = core_id;
|
||||
|
||||
/* signalizes that this task could be reused */
|
||||
if (curr_task->status == TASK_FINISHED)
|
||||
curr_task->status = TASK_INVALID;
|
||||
curr_task->status = TASK_INVALID;
|
||||
|
||||
/* if the task is using the FPU, we need to save the FPU context */
|
||||
if (curr_task->flags & TASK_FPU_USED) {
|
||||
|
@ -846,64 +1169,87 @@ void scheduler(void)
|
|||
curr_task->flags &= ~TASK_FPU_USED;
|
||||
}
|
||||
|
||||
for(i=0, new_id=(last_id.id + 1) % MAX_TASKS;
|
||||
i<MAX_TASKS; i++, new_id=(new_id+1) % MAX_TASKS)
|
||||
spinlock_lock(&runqueues[core_id].lock);
|
||||
|
||||
// check timers
|
||||
current_tick = get_clock_tick();
|
||||
while (runqueues[core_id].timers.first && runqueues[core_id].timers.first->timeout <= current_tick)
|
||||
{
|
||||
if (task_table[new_id].flags & TASK_TIMER_USED) {
|
||||
if (task_table[new_id].status != TASK_BLOCKED)
|
||||
task_table[new_id].flags &= ~TASK_TIMER_USED;
|
||||
if ((task_table[new_id].status == TASK_BLOCKED) && (current_tick >= task_table[new_id].timeout)) {
|
||||
task_table[new_id].flags &= ~TASK_TIMER_USED;
|
||||
task_table[new_id].status = TASK_READY;
|
||||
}
|
||||
}
|
||||
task_t* task = runqueues[core_id].timers.first;
|
||||
|
||||
if ((task_table[new_id].status == TASK_READY) && !(task_table[new_id].flags & TASK_SWITCH_IN_PROGRESS)) {
|
||||
if (curr_task->status == TASK_RUNNING) {
|
||||
curr_task->status = TASK_READY;
|
||||
#if MAX_CORES > 1
|
||||
curr_task->flags |= TASK_SWITCH_IN_PROGRESS;
|
||||
per_core(old_task) = curr_task;
|
||||
#endif
|
||||
}
|
||||
#if MAX_CORES > 1
|
||||
else per_core(old_task) = NULL;
|
||||
#endif
|
||||
task_table[new_id].status = TASK_RUNNING;
|
||||
curr_task = per_core(current_task) = task_table+new_id;
|
||||
last_id.id = new_id;
|
||||
// remove timer from queue
|
||||
runqueues[core_id].timers.first = runqueues[core_id].timers.first->next;
|
||||
if (!runqueues[core_id].timers.first)
|
||||
runqueues[core_id].timers.last = NULL;
|
||||
|
||||
goto get_task_out;
|
||||
// wakeup task
|
||||
if (task->status == TASK_BLOCKED) {
|
||||
task->status = TASK_READY;
|
||||
prio = task->prio;
|
||||
|
||||
// increase the number of ready tasks
|
||||
runqueues[core_id].nr_tasks++;
|
||||
|
||||
// add task to the runqueue
|
||||
if (!runqueues[core_id].queue[prio-1].first) {
|
||||
runqueues[core_id].queue[prio-1].last = runqueues[core_id].queue[prio-1].first = task;
|
||||
task->next = task->prev = NULL;
|
||||
runqueues[core_id].prio_bitmap |= (1 << prio);
|
||||
} else {
|
||||
task->prev = runqueues[core_id].queue[prio-1].last;
|
||||
task->next = NULL;
|
||||
runqueues[core_id].queue[prio-1].last->next = task;
|
||||
runqueues[core_id].queue[prio-1].last = task;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
runqueues[core_id].old_task = NULL; // reset old task
|
||||
prio = last_set(runqueues[core_id].prio_bitmap); // determines highest priority
|
||||
#if MAX_CORES > 1
|
||||
per_core(old_task) = NULL;
|
||||
if (!prio) {
|
||||
load_balancing();
|
||||
prio = last_set(runqueues[core_id].prio_bitmap); // retry...
|
||||
}
|
||||
#endif
|
||||
|
||||
if ((curr_task->status == TASK_RUNNING) || (curr_task->status == TASK_IDLE))
|
||||
goto get_task_out;
|
||||
if (BUILTIN_EXPECT(prio > MAX_PRIO, 0)) {
|
||||
kprintf("Invalid priority %u by bitmap 0x%x\n", prio, runqueues[core_id].prio_bitmap);
|
||||
prio = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* we switch to the idle task, if the current task terminates
|
||||
* and no other is ready
|
||||
*/
|
||||
new_id = CORE_ID;
|
||||
curr_task = per_core(current_task) = task_table+CORE_ID;
|
||||
if (!prio) {
|
||||
if ((curr_task->status == TASK_RUNNING) || (curr_task->status == TASK_IDLE))
|
||||
goto get_task_out;
|
||||
curr_task = per_core(current_task) = runqueues[core_id].idle;
|
||||
} else {
|
||||
// Does the current task have an higher priority? => no task switch
|
||||
if ((curr_task->prio > prio) && (curr_task->status == TASK_RUNNING))
|
||||
goto get_task_out;
|
||||
|
||||
if (curr_task->status == TASK_RUNNING) {
|
||||
curr_task->status = TASK_READY;
|
||||
runqueues[core_id].old_task = curr_task;
|
||||
}
|
||||
|
||||
curr_task = per_core(current_task) = runqueues[core_id].queue[prio-1].first;
|
||||
curr_task->status = TASK_RUNNING;
|
||||
|
||||
// remove new task from queue
|
||||
runqueues[core_id].queue[prio-1].first = curr_task->next;
|
||||
if (!curr_task->next) {
|
||||
runqueues[core_id].queue[prio-1].last = NULL;
|
||||
runqueues[core_id].prio_bitmap &= ~(1 << prio);
|
||||
}
|
||||
}
|
||||
|
||||
get_task_out:
|
||||
#if MAX_CORES > 1
|
||||
spinlock_irqsave_unlock(&table_lock);
|
||||
#endif
|
||||
spinlock_unlock(&runqueues[core_id].lock);
|
||||
|
||||
if (curr_task != orig_task) {
|
||||
//kprintf("schedule from %d to %d on core %d\n", orig_task->id, curr_task->id, smp_id());
|
||||
switch_task(new_id);
|
||||
#if MAX_CORES > 1
|
||||
orig_task= per_core(old_task);
|
||||
if (orig_task)
|
||||
orig_task->flags &= ~TASK_SWITCH_IN_PROGRESS;
|
||||
#endif
|
||||
//kprintf("schedule from %u to %u with prio %u on core %u\n",
|
||||
// orig_task->id, curr_task->id, (uint32_t)curr_task->prio, CORE_ID);
|
||||
switch_task(curr_task->id);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -254,7 +254,7 @@ static int join_test(void* arg)
|
|||
tid_t id, ret;
|
||||
int result = -1234;
|
||||
|
||||
create_kernel_task(&id, foo, "Hello from foo2");
|
||||
create_kernel_task(&id, foo, "Hello from foo2", HIGH_PRIO);
|
||||
|
||||
kprintf("Wait for child %u\n", id);
|
||||
do {
|
||||
|
@ -409,14 +409,17 @@ int test_init(void)
|
|||
// create_kernel_task(NULL,client_task,NULL);
|
||||
#endif
|
||||
|
||||
//create_kernel_task(NULL, foo, "Hello from foo1");
|
||||
//create_kernel_task(NULL, join_test, NULL);
|
||||
create_kernel_task(NULL, foo, "Hello from foo1", NORMAL_PRIO);
|
||||
create_kernel_task(NULL, join_test, NULL, NORMAL_PRIO);
|
||||
//create_kernel_task(NULL, producer, NULL);
|
||||
//create_kernel_task(NULL, consumer, NULL);
|
||||
//create_kernel_task(NULL, mail_ping, NULL);
|
||||
create_kernel_task(NULL, svm_test, NULL);
|
||||
//create_kernel_task(NULL, svm_test, NULL);
|
||||
//create_kernel_task(NULL, producer, , NORMAL_PRIO);
|
||||
//create_kernel_task(NULL, consumer, NULL, NORMAL_PRIO);
|
||||
//create_kernel_task(NULL, mail_ping, NULL, NORMAL_PRIO);
|
||||
//create_user_task(NULL, "/bin/hello", argv);
|
||||
//create_user_task(NULL, "/bin/tests", argv);
|
||||
create_user_task(NULL, "/bin/tests", argv);
|
||||
//create_user_task(NULL, "/bin/jacobi", argv);
|
||||
//create_user_task(NULL, "/bin/jacobi", argv);
|
||||
//create_user_task(NULL, "/bin/server", server_argv);
|
||||
|
|
|
@ -85,7 +85,7 @@ sys_thread_t sys_thread_new(const char *name, lwip_thread_fn thread, void *arg,
|
|||
{
|
||||
sys_thread_t tmp;
|
||||
|
||||
create_kernel_task(&tmp, thread, arg);
|
||||
create_kernel_task(&tmp, thread, arg, prio);
|
||||
kprintf("Created LWIP task %s with id %u\n", name, tmp);
|
||||
|
||||
return tmp;
|
||||
|
|
|
@ -104,6 +104,20 @@
|
|||
*/
|
||||
#define IP_FORWARD 1
|
||||
|
||||
/**
|
||||
* TCPIP_THREAD_PRIO: The priority assigned to the main tcpip thread.
|
||||
* The priority value itself is platform-dependent, but is passed to
|
||||
* sys_thread_new() when the thread is created.
|
||||
*/
|
||||
#define TCPIP_THREAD_PRIO HIGH_PRIO
|
||||
|
||||
/**
|
||||
* DEFAULT_THREAD_PRIO: The priority assigned to any other lwIP thread.
|
||||
* The priority value itself is platform-dependent, but is passed to
|
||||
* sys_thread_new() when the thread is created.
|
||||
*/
|
||||
#define DEFAULT_THREAD_PRIO NORMAL_PRIO
|
||||
|
||||
/* DEBUG options */
|
||||
#define LWIP_DEBUG 1
|
||||
#define DHCP_DEBUG LWIP_DBG_OFF
|
||||
|
|
Loading…
Add table
Reference in a new issue