Merge branch 'master' into ohligs

- one more bug: block_task is missing

Conflicts:
	include/metalsvm/tasks_types.h
	kernel/main.c
	kernel/tasks.c
This commit is contained in:
Marian Ohligs 2011-08-06 17:51:48 +02:00
commit cdc6707852
22 changed files with 419 additions and 287 deletions

View file

@ -42,9 +42,9 @@ extern "C" {
// feature list 1
#define CPU_FEATURE_FPU (1 << 0)
#define CPU_FEATURE_MMX (1 << 23)
#define CPU_FEATURE_FXSR (1 << 24)
#define CPU_FEATURE_FXSR (1 << 24)
#define CPU_FEATURE_SSE (1 << 25)
#define CPU_FEATURE_SSE2 (1 << 26)
#define CPU_FEATURE_SSE2 (1 << 26)
// feature list 2
#define CPU_FEATURE_AVX (1 << 28)

View file

@ -36,6 +36,11 @@
extern "C" {
#endif
/**
* @brief Dump some scheduling statistics
*/
int dump_scheduling_statistics(void);
/** @brief Fork a task from current task
*
* @param task Pointer to the task structure to fork to

View file

@ -301,9 +301,6 @@ void smp_start(uint32_t id)
// install IDT
idt_install();
// enable additional cpu features
cpu_detection();
/* enable paging */
write_cr3((uint32_t)get_boot_pgd());
i = read_cr0();
@ -320,6 +317,9 @@ void smp_start(uint32_t id)
*/
register_task(per_core(current_task));
// enable additional cpu features
cpu_detection();
smp_main();
// idle loop
@ -338,7 +338,7 @@ static apic_mp_t* search_apic(size_t base, size_t limit) {
if (tmp->signature == MP_FLT_SIGNATURE) {
if (!((tmp->version > 4) || tmp->features[0]))
return tmp;
}
}
}
return NULL;

View file

@ -116,9 +116,8 @@ int arch_fork(task_t* task)
asm volatile ("pop %0" : "=r"(task_state_segments[id].ecx));
asm volatile ("pop %0" : "=r"(task_state_segments[id].eax));
// store current EFLAGS and set IF flag
// => the parent task will enable the interrupt handling
asm volatile ("pushf; pop %%eax; or $2,%%ah" : "=a"(task_state_segments[id].eflags));
// store the current EFLAGS
asm volatile ("pushf; pop %%eax" : "=a"(task_state_segments[id].eflags));
// This will be the entry point for the new task.
asm volatile ("call read_eip" : "=a"(task_state_segments[id].eip));
@ -146,7 +145,7 @@ int create_default_frame(task_t* task, internal_entry_point_t ep, void* arg)
task_state_segments[id].fs = ds;
task_state_segments[id].gs = ds;
task_state_segments[id].es = ds;
task_state_segments[id].eflags = 0x1202;
task_state_segments[id].eflags = 0x1002; // 0x1202;
task_state_segments[id].cr3 = (uint32_t) (virt_to_phys((size_t)task->pgd));
task_state_segments[id].eip = (uint32_t) ep;
task_state_segments[id].esp = (uint32_t) kstacks[id] + KERNEL_STACK_SIZE - sizeof(size_t);

View file

@ -23,6 +23,7 @@
#include <metalsvm/time.h>
#include <metalsvm/processor.h>
#include <metalsvm/errno.h>
#include <metalsvm/spinlock.h>
#include <asm/irq.h>
#include <asm/irqflags.h>
#include <asm/gdt.h>
@ -33,7 +34,7 @@
* This will keep track of how many ticks the system
* has been running for
*/
static volatile uint64_t timer_ticks __attribute__ ((aligned (CACHE_LINE))) = 0;
static volatile uint64_t timer_ticks = 0;
uint64_t get_clock_tick(void)
{
@ -60,40 +61,60 @@ int sys_times(struct tms* buffer, clock_t* clock)
*/
static void timer_handler(struct state *s)
{
uint32_t i;
/* Increment our 'tick counter' */
#if MAX_CORES > 1
if (smp_id() == 0)
timer_ticks++;
#else
timer_ticks++;
#endif
{
timer_ticks++;
/*
* Every TIMER_FREQ clocks (approximately 1 second), we will
* display a message on the screen
*/
/*if (timer_ticks % TIMER_FREQ == 0) {
vga_puts("One second has passed\n");
}*/
/*
* Every TIMER_FREQ clocks (approximately 1 second), we will
* display a message on the screen
*/
/*if (timer_ticks % TIMER_FREQ == 0) {
vga_puts("One second has passed\n");
}*/
}
}
/*
* This will continuously loop until the given time has
* been reached
*/
void timer_wait(unsigned int ticks)
int timer_wait(unsigned int ticks)
{
uint64_t eticks = timer_ticks + ticks;
task_t* curr_task = per_core(current_task);
while (timer_ticks < eticks) {
if (curr_task->status == TASK_IDLE)
{
/*
* This will continuously loop until the given time has
* been reached
*/
while (timer_ticks < eticks) {
check_workqueues();
// recheck break condition
if (timer_ticks >= eticks)
break;
HALT;
}
} else if (timer_ticks < eticks) {
check_workqueues();
// recheck break condition
if (timer_ticks >= eticks)
break;
if (timer_ticks < eticks) {
uint32_t flags = irq_nested_disable();
curr_task->timeout = eticks;
curr_task->flags |= TASK_TIMER_USED;
curr_task->status = TASK_BLOCKED;
irq_nested_enable(flags);
reschedule();
reschedule();
}
}
return 0;
}
#define LATCH(f) ((CLOCK_TICK_RATE + f/2) / f)

View file

@ -540,7 +540,7 @@ __inline void mmnif_lock_rx_hdr(int dest_ip)
#else
if(disable_locking) return;
mm_rx_buffer_t* hdr = (char*)mpb_start_address + ( dest_ip -1 ) * mpb_size;
sem_wait(&hdr->lock);
sem_wait(&hdr->lock, 0);
#endif
}
/* mmnif_unlock_rx_hdr(): unlock the header
@ -879,7 +879,7 @@ err_t mmnif_init(struct netif* netif)
*/
sem_init(&mmnif->com_poll,1);
sem_wait(&mmnif->com_poll);
sem_wait(&mmnif->com_poll,0);
/* inform via interrupt should be the dafault
*/
@ -1113,7 +1113,7 @@ static int mmnif_wait(struct netif* netif, uint32_t poll, int budget)
}
else
{
mailbox_ptr_fetch(&(mmnif->mbox), (void**) &p);
mailbox_ptr_fetch(&(mmnif->mbox), (void**) &p,0);
}
/* if there is data, pass it up to the lwip
@ -1174,7 +1174,7 @@ int mmnif_poll(void* e)
if (!no_irq)
{
sem_wait(&mmnif->com_poll);
sem_wait(&mmnif->com_poll,0);
}
/*run while driver is up*/
@ -1196,7 +1196,7 @@ int mmnif_poll(void* e)
#ifdef DEBUG_MMNIF
DEBUGPRINTF("mmnif_poll(): heuristical interrupts enabled\n");
#endif
sem_wait(&mmnif->com_poll);
sem_wait(&mmnif->com_poll,0);
mmnif->stats.pll_empty = 1;
}
}

View file

@ -276,7 +276,7 @@ static void rckemacif_input(struct netif* netif, struct pbuf* p)
#endif /* PPPOE_SUPPORT */
/* full packet send to tcpip_thread to process */
if (mynetif->input(p, mynetif) != ERR_OK) {
LWIP_DEBUGF(NETIF_DEBUG, ("rtl8139if_input: IP input error\n"));
LWIP_DEBUGF(NETIF_DEBUG, ("rckemacif_input: IP input error\n"));
pbuf_free(p);
}
break;

View file

@ -113,7 +113,8 @@ static err_t rtl8139if_output(struct netif* netif, struct pbuf* p)
static void rtl8139if_input(struct netif* netif, struct pbuf* p)
{
struct eth_hdr *ethhdr;
err_t err;
/* points to packet payload, which starts with an Ethernet header */
ethhdr = p->payload;
@ -127,8 +128,9 @@ static void rtl8139if_input(struct netif* netif, struct pbuf* p)
case ETHTYPE_PPPOE:
#endif /* PPPOE_SUPPORT */
/* full packet send to tcpip_thread to process */
if (mynetif->input(p, mynetif) != ERR_OK) {
LWIP_DEBUGF(NETIF_DEBUG, ("rtl8139if_input: IP input error\n"));
err = mynetif->input(p, mynetif);
if (err != ERR_OK) {
LWIP_DEBUGF(NETIF_DEBUG, ("rtl8139if_input: IP input error %d\n", (int32_t) err));
pbuf_free(p);
}
break;

View file

@ -42,6 +42,9 @@ int network_init(void);
/** @brief Shutdown the networking subsystem. */
int network_shutdown(void);
/** @brief Entry point of the init task */
int initd(void* arg);
#ifdef __cplusplus
}
#endif

View file

@ -24,6 +24,7 @@
#include <metalsvm/mailbox_types.h>
#include <metalsvm/tasks.h>
#include <metalsvm/semaphore.h>
#include <metalsvm/errno.h>
#ifdef __cplusplus
extern "C" {
@ -32,7 +33,7 @@ extern "C" {
#define MAILBOX(name, type) \
inline static int mailbox_##name##_init(mailbox_##name##_t* m) { \
if (BUILTIN_EXPECT(!m, 0)) \
return -1; \
return -EINVAL; \
\
memset(m->buffer, 0x00, sizeof(type)*MAILBOX_SIZE); \
m->wpos = m->rpos = 0; \
@ -46,7 +47,7 @@ extern "C" {
\
inline static int mailbox_##name##_destroy(mailbox_##name##_t* m) { \
if (BUILTIN_EXPECT(!m, 0)) \
return -1; \
return -EINVAL; \
\
sem_destroy(&m->mails); \
sem_destroy(&m->boxes); \
@ -58,9 +59,9 @@ extern "C" {
\
inline static int mailbox_##name##_post(mailbox_##name##_t* m, type mail) { \
if (BUILTIN_EXPECT(!m, 0)) \
return -1; \
return -EINVAL; \
\
sem_wait(&m->boxes); \
sem_wait(&m->boxes, 0); \
spinlock_lock(&m->wlock); \
m->buffer[m->wpos] = mail; \
m->wpos = (m->wpos+1) % MAILBOX_SIZE; \
@ -70,11 +71,29 @@ extern "C" {
return 0; \
} \
\
inline static int mailbox_##name##_fetch(mailbox_##name##_t* m, type* mail) { \
if (BUILTIN_EXPECT(!m || !mail, 0)) \
return -1; \
inline static int mailbox_##name##_trypost(mailbox_##name##_t* m, type mail) { \
if (BUILTIN_EXPECT(!m, 0)) \
return -EINVAL; \
\
sem_wait(&m->mails); \
if (sem_trywait(&m->boxes)) \
return -EBUSY; \
spinlock_lock(&m->wlock); \
m->buffer[m->wpos] = mail; \
m->wpos = (m->wpos+1) % MAILBOX_SIZE; \
spinlock_unlock(&m->wlock); \
sem_post(&m->mails); \
\
return 0; \
} \
\
inline static int mailbox_##name##_fetch(mailbox_##name##_t* m, type* mail, uint32_t ms) { \
int err; \
\
if (BUILTIN_EXPECT(!m || !mail, 0)) \
return -EINVAL; \
\
err = sem_wait(&m->mails, ms); \
if (err) return err; \
spinlock_lock(&m->rlock); \
*mail = m->buffer[m->rpos]; \
m->rpos = (m->rpos+1) % MAILBOX_SIZE; \
@ -86,10 +105,10 @@ extern "C" {
\
inline static int mailbox_##name##_tryfetch(mailbox_##name##_t* m, type* mail) { \
if (BUILTIN_EXPECT(!m || !mail, 0)) \
return -1; \
return -EINVAL; \
\
if (sem_trywait(&m->mails) != 0) \
return -1; \
return -EINVAL; \
spinlock_lock(&m->rlock); \
*mail = m->buffer[m->rpos]; \
m->rpos = (m->rpos+1) % MAILBOX_SIZE; \

View file

@ -29,6 +29,8 @@
#include <metalsvm/string.h>
#include <metalsvm/semaphore_types.h>
#include <metalsvm/spinlock.h>
#include <metalsvm/errno.h>
#include <metalsvm/time.h>
#ifdef __cplusplus
extern "C" {
@ -43,19 +45,19 @@ extern "C" {
*
* @return
* - 0 on success
* - -1 on failure
* - -EINVAL on invalid argument
*/
inline static int sem_init(sem_t* s, unsigned int v) {
unsigned int i;
if (BUILTIN_EXPECT(!s, 0))
return -1;
return -EINVAL;
s->value = v;
s->pos = 0;
for(i=0; i<MAX_TASKS; i++)
s->queue[i] = MAX_TASKS;
spinlock_init(&s->lock);
spinlock_irqsave_init(&s->lock);
return 0;
}
@ -63,42 +65,13 @@ inline static int sem_init(sem_t* s, unsigned int v) {
/** @brief Destroy semaphore
* @return
* - 0 on success
* - -1 on failure
* - -EINVAL on invalid argument
*/
inline static int sem_destroy(sem_t* s) {
if (BUILTIN_EXPECT(!s, 0))
return -1;
return -EINVAL;
spinlock_destroy(&s->lock);
return 0;
}
/** @brief Blocking wait for semaphore
*
* This will put your task to sleep
*
* @return
* - 0 on success
* - -1 on failure
*/
inline static int sem_wait(sem_t* s) {
if (BUILTIN_EXPECT(!s, 0))
return -1;
next_try:
spinlock_lock(&s->lock);
if (s->value > 0) {
s->value--;
spinlock_unlock(&s->lock);
} else {
s->queue[s->pos] = per_core(current_task)->id;
s->pos = (s->pos + 1) % MAX_TASKS;
block_task(per_core(current_task)->id);
spinlock_unlock(&s->lock);
reschedule();
goto next_try;
}
spinlock_irqsave_destroy(&s->lock);
return 0;
}
@ -109,37 +82,111 @@ next_try:
*
* @return
* - 0 on success (You got the semaphore)
* - -1 on failure (You still have to wait)
* - -EINVAL on invalid argument
* - -ECANCELED on failure (You still have to wait)
*/
inline static int sem_trywait(sem_t* s) {
int ret = -1;
int ret = -ECANCELED;
if (BUILTIN_EXPECT(!s, 0))
return -1;
return -EINVAL;
spinlock_lock(&s->lock);
spinlock_irqsave_lock(&s->lock);
if (s->value > 0) {
s->value--;
ret = 0;
}
spinlock_unlock(&s->lock);
spinlock_irqsave_unlock(&s->lock);
return ret;
}
/** @brief Blocking wait for semaphore
*
* @param ms Timeout in milliseconds
* @return
* - 0 on success
* - -EINVAL on invalid argument
* - -ETIME on timer expired
*/
inline static int sem_wait(sem_t* s, uint32_t ms) {
task_t* curr_task = per_core(current_task);
if (BUILTIN_EXPECT(!s, 0))
return -EINVAL;
if (!ms) {
next_try1:
spinlock_irqsave_lock(&s->lock);
if (s->value > 0) {
s->value--;
spinlock_irqsave_unlock(&s->lock);
} else {
s->queue[s->pos] = curr_task->id;
s->pos = (s->pos + 1) % MAX_TASKS;
curr_task->status = TASK_BLOCKED;
spinlock_irqsave_unlock(&s->lock);
reschedule();
NOP2;
goto next_try1;
}
return 0;
} else {
uint32_t ticks = (ms * TIMER_FREQ) / 1000;
uint32_t remain = (ms * TIMER_FREQ) % 1000;
if (ticks) {
uint64_t deadline = get_clock_tick() + ticks;
next_try2:
spinlock_irqsave_lock(&s->lock);
if (s->value > 0) {
s->value--;
spinlock_irqsave_unlock(&s->lock);
return 0;
} else {
if (get_clock_tick() >= deadline) {
spinlock_irqsave_unlock(&s->lock);
goto timeout;
}
s->queue[s->pos] = curr_task->id;
s->pos = (s->pos + 1) % MAX_TASKS;
curr_task->timeout = deadline;
curr_task->flags |= TASK_TIMER_USED;
curr_task->status = TASK_BLOCKED;
spinlock_irqsave_unlock(&s->lock);
reschedule();
goto next_try2;
}
}
timeout:
while (remain) {
udelay(1000);
remain--;
if (!sem_trywait(s))
return 0;
}
return -ETIME;
}
}
/** @brief Give back resource
* @return
* - 0 on success
* - -1 on failure
* - -EINVAL on invalid argument
*/
inline static int sem_post(sem_t* s) {
if (BUILTIN_EXPECT(!s, 0))
return -1;
return -EINVAL;
spinlock_lock(&s->lock);
spinlock_irqsave_lock(&s->lock);
if (s->value > 0) {
s->value++;
spinlock_unlock(&s->lock);
spinlock_irqsave_unlock(&s->lock);
} else {
unsigned int k, i;
@ -153,7 +200,7 @@ inline static int sem_post(sem_t* s) {
}
i = (i + 1) % MAX_TASKS;
}
spinlock_unlock(&s->lock);
spinlock_irqsave_unlock(&s->lock);
}
return 0;

View file

@ -41,11 +41,11 @@ typedef struct {
/// Position in queue
unsigned int pos;
/// Access lock
spinlock_t lock;
spinlock_irqsave_t lock;
} sem_t;
/// Macro for initialization of semaphore
#define SEM_INIT(v) {v, {[0 ... MAX_TASKS-1] = MAX_TASKS}, 0, SPINLOCK_INIT}
#define SEM_INIT(v) {v, {[0 ... MAX_TASKS-1] = MAX_TASKS}, 0, SPINLOCK_IRQSAVE_INIT}
#ifdef __cplusplus
}

View file

@ -100,13 +100,6 @@ void scheduler(void);
*/
int wakeup_task(tid_t);
/** @brief Change a task's status to TASK_BLOCKED
* @return
* - 0 on success
* - -EINVAL (-22) on failure
*/
int block_task(tid_t);
/** @brief Abort current task */
void NORETURN abort(void);

View file

@ -42,15 +42,17 @@ extern "C" {
#endif
#define TASK_INVALID 0
#define TASK_READY 1
#define TASK_READY 1
#define TASK_RUNNING 2
#define TASK_BLOCKED 3
#define TASK_FINISHED 4
#define TASK_IDLE 5
#define TASK_IDLE 5
#define TASK_DEFAULT_FLAGS 0
#define TASK_FPU_INIT (1 << 0)
#define TASK_FPU_USED (1 << 1)
#define TASK_TIMER_USED (1 << 2)
#define TASK_SWITCH_IN_PROGRESS (1 << 3)
typedef int (*entry_point_t)(void*);
typedef int (STDCALL *internal_entry_point_t)(void*);
@ -62,8 +64,14 @@ typedef struct task {
tid_t id;
/// Task status (INVALID, READY, RUNNING, ...)
uint32_t status;
/// Additional status flags. For instance, to signalize the using of the FPU
uint32_t flags;
/// Number of used time slices
uint32_t time_slices;
/// timeout for a blocked task
uint64_t timeout;
/// Usage in number of pages
atomic_int32_t user_usage;
atomic_int32_t user_usage;
/// Avoids concurrent access to the page directory
spinlock_t pgd_lock;
/// pointer to the page directory
@ -74,8 +82,6 @@ typedef struct task {
vma_t* vma_list;
/// Filedescriptor table
fildes_t fildes_table[NR_OPEN];
/// Additional status flags. For instance, to signalize the using of the FPU
uint32_t flags;
/// starting time/tick of the task
uint64_t start_tick;
/// Start address of the heap
@ -88,7 +94,7 @@ typedef struct task {
mailbox_wait_msg_t* outbox[MAX_TASKS];
/// FPU state
union fpu_state fpu;
} __attribute__((packed)) task_t;
} task_t;
#ifdef __cplusplus
}

View file

@ -56,13 +56,13 @@ int sys_times(struct tms*, clock_t* clock);
*/
int timer_init(void);
/** @brief Blocking wait function
*
* This function does no busy-wait.
/** @brief Initialized a timer
*
* @param ticks Amount of ticks to wait
* @return
* - 0 on success
*/
void timer_wait(unsigned int ticks);
int timer_wait(unsigned int ticks);
/** @brief Returns the current number of ticks.
* @return Current number of ticks

View file

@ -25,6 +25,7 @@
#include <metalsvm/tasks.h>
#include <metalsvm/errno.h>
#include <metalsvm/init.h>
#include <metalsvm/fs.h>
#ifdef CONFIG_LWIP
#include <lwip/init.h>
#include <lwip/sys.h>
@ -47,6 +48,7 @@
void echo_init(void);
void ping_init(void);
int test_init(void);
/*
* Note that linker symbols are not variables, they have no memory allocated for
@ -177,4 +179,54 @@ int network_shutdown(void)
return 0;
}
static void list_fs(vfs_node_t* node, uint32_t depth)
{
int j, i = 0;
dirent_t* dirent = NULL;
fildes_t* file = kmalloc(sizeof(fildes_t));
file->offset = 0;
file->flags = 0;
while ((dirent = readdir_fs(node, i)) != 0) {
for(j=0; j<depth; j++)
kputs(" ");
kprintf("%s\n", dirent->name);
if (strcmp(dirent->name, ".") && strcmp(dirent->name, "..")) {
vfs_node_t *new_node = finddir_fs(node, dirent->name);
if (new_node) {
if (new_node->type == FS_FILE) {
char buff[16] = {[0 ... 15] = 0x00};
file->node = new_node;
file->offset = 0;
file->flags = 0;
read_fs(file, (uint8_t*)buff, 8);
for(j=0; j<depth+1; j++)
kputs(" ");
kprintf("content: %s\n", buff);
} else list_fs(new_node, depth+1);
}
}
i++;
}
kfree(file, sizeof(fildes_t));
}
static void list_root(void) {
kprintf("List of the file system:\n/\n");
list_fs(fs_root, 1);
}
int initd(void* arg)
{
network_init();
list_root();
test_init();
return 0;
}

View file

@ -24,9 +24,9 @@
#include <metalsvm/mmu.h>
#include <metalsvm/tasks.h>
#include <metalsvm/processor.h>
#include <metalsvm/fs.h>
#include <metalsvm/errno.h>
#include <metalsvm/init.h>
#include <metalsvm/fs.h>
#include <asm/irq.h>
#include <asm/irqflags.h>
#include <asm/kb.h>
@ -34,8 +34,6 @@
#include <asm/icc.h>
#endif
extern int test_init(void);
/*
* Note that linker symbols are not variables, they have no memory allocated for
* maintaining a value, rather their address is their value.
@ -45,48 +43,6 @@ extern const void kernel_end;
extern char __BUILD_DATE;
extern char __BUILD_TIME;
static void list_fs(vfs_node_t* node, uint32_t depth)
{
int j, i = 0;
dirent_t* dirent = NULL;
fildes_t* file = kmalloc(sizeof(fildes_t));
file->offset = 0;
file->flags = 0;
while ((dirent = readdir_fs(node, i)) != 0) {
for(j=0; j<depth; j++)
kputs(" ");
kprintf("%s\n", dirent->name);
if (strcmp(dirent->name, ".") && strcmp(dirent->name, "..")) {
vfs_node_t *new_node = finddir_fs(node, dirent->name);
if (new_node) {
if (new_node->type == FS_FILE) {
char buff[16] = {[0 ... 15] = 0x00};
file->node = new_node;
file->offset = 0;
file->flags = 0;
read_fs(file, (uint8_t*)buff, 8);
for(j=0; j<depth+1; j++)
kputs(" ");
kprintf("content: %s\n", buff);
} else list_fs(new_node, depth+1);
}
}
i++;
}
kfree(file, sizeof(fildes_t));
}
static void list_root(void) {
kprintf("List of the file system:\n/\n");
list_fs(fs_root, 1);
}
#if MAX_CORES > 1
// idle loop of the application processors
int smp_main(void)
@ -126,7 +82,6 @@ int main(void)
kprintf("Kernel starts at %p and ends at %p\n", &kernel_start, &kernel_end);
system_calibration();
network_init();
kprintf("Processor frequency: %u MHz\n", get_cpu_frequency());
kprintf("Total memory: %u MBytes\n", atomic_int32_read(&total_pages)/((1024*1024)/PAGE_SIZE));
@ -134,9 +89,8 @@ int main(void)
kprintf("Current available memory: %u MBytes\n", atomic_int32_read(&total_available_pages)/((1024*1024)/PAGE_SIZE));
sleep(5);
list_root();
test_init();
per_core(current_task)->status = TASK_IDLE;
create_kernel_task(NULL, initd, NULL);
per_core(current_task)->time_slices = 0; // reset the number of time slices
reschedule();
while(1) {

View file

@ -47,9 +47,8 @@
* A task's id will be its position in this array.
*/
static task_t task_table[MAX_TASKS] = { \
[0] = {0, TASK_RUNNING, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, FS_INIT, 0, 0, 0, 0}, \
[1 ... MAX_TASKS-1] = {0, TASK_INVALID, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, FS_INIT, 0, 0, 0, 0}};
[0] = {0, TASK_IDLE, 0, 0, 0, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, FS_INIT, 0, 0, 0}, \
[1 ... MAX_TASKS-1] = {0, TASK_INVALID, 0, 0, 0, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, FS_INIT, 0, 0, 0}};
static spinlock_irqsave_t table_lock = SPINLOCK_IRQSAVE_INIT;
DEFINE_PER_CORE(task_t*, current_task, task_table+0);
@ -64,16 +63,36 @@ task_t* get_current_task(void) {
return per_core(current_task);
}
int multitasking_init(void) {
if (BUILTIN_EXPECT(task_table[0].status == TASK_RUNNING, 1)) {
mailbox_wait_msg_init(&task_table[0].inbox);
memset(task_table[0].outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS);
task_table[0].pgd = get_boot_pgd();
task_table[0].flags = TASK_DEFAULT_FLAGS;
return 0;
int dump_scheduling_statistics(void)
{
uint32_t i;
uint32_t id = 0;
kprintf("Scheduling statistics:\n");
kprintf("======================\n");
kprintf("total ticks:\t%llu\n", get_clock_tick());
for(i=0; i<MAX_CORES; i++) {
if (task_table[i].status == TASK_IDLE) {
kprintf("core %d :\t%u idle slices\n", id, task_table[i].time_slices);
id++;
}
}
return -ENOMEM;
return 0;
}
int multitasking_init(void) {
if (BUILTIN_EXPECT(task_table[0].status != TASK_IDLE, 0)) {
kputs("Task 0 is not an idle task\n");
return -ENOMEM;
}
mailbox_wait_msg_init(&task_table[0].inbox);
memset(task_table[0].outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS);
task_table[0].pgd = get_boot_pgd();
task_table[0].flags = TASK_DEFAULT_FLAGS;
return 0;
}
size_t get_idle_task(uint32_t id)
@ -84,11 +103,12 @@ size_t get_idle_task(uint32_t id)
task_table[id].id = id;
task_table[id].status = TASK_IDLE;
task_table[id].flags = TASK_DEFAULT_FLAGS;
task_table[id].time_slices = 0;
atomic_int32_set(&task_table[id].user_usage, 0);
mailbox_wait_msg_init(&task_table[id].inbox);
memset(task_table[id].outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS);
task_table[id].pgd = get_boot_pgd();
task_table[id].flags = TASK_DEFAULT_FLAGS;
current_task[id].var = task_table+id;
return get_stack(id);
@ -174,17 +194,6 @@ void NORETURN abort(void) {
do_exit(-1);
}
/*
* @brief: if the task gets the first time slice,
* the table_lock is hold and have to be released.
*/
inline static void start_first_time_slice(void)
{
#if MAX_CORES > 1
spinlock_irqsave_unlock(&table_lock);
#endif
}
/** @brief Create a task with a specific entry point
*
* @param id Pointer to a tid_t struct were the id shall be set
@ -218,6 +227,9 @@ static int create_task(tid_t* id, internal_entry_point_t ep, void* arg)
}
task_table[i].id = i;
task_table[i].status = TASK_READY;
task_table[i].flags = TASK_DEFAULT_FLAGS;
task_table[i].time_slices = 0;
spinlock_init(&task_table[i].vma_lock);
task_table[i].vma_list = NULL;
mailbox_wait_msg_init(&task_table[i].inbox);
@ -232,11 +244,9 @@ static int create_task(tid_t* id, internal_entry_point_t ep, void* arg)
ret = create_default_frame(task_table+i, ep, arg);
task_table[i].flags = TASK_DEFAULT_FLAGS;
task_table[i].start_heap = 0;
task_table[i].end_heap = 0;
task_table[i].start_tick = get_clock_tick();
task_table[i].status = TASK_READY;
break;
}
}
@ -300,7 +310,7 @@ int sys_fork(void)
mailbox_wait_msg_init(&task_table[i].inbox);
memset(task_table[i].outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS);
task_table[i].outbox[parent_task->id] = &parent_task->inbox;
task_table[i].flags = parent_task->flags;
task_table[i].flags = parent_task->flags & ~TASK_SWITCH_IN_PROGRESS;
memcpy(&(task_table[i].fpu), &(parent_task->fpu), sizeof(union fpu_state));
task_table[i].start_tick = get_clock_tick();
task_table[i].start_heap = 0;
@ -313,7 +323,13 @@ int sys_fork(void)
// Leave the function without releasing the locks
// because the locks are already released
// by the parent task!
start_first_time_slice();
#if MAX_CORES > 1
task_t* old = per_core(old_task);
if (old)
old->flags &= ~TASK_SWITCH_IN_PROGRESS;
#endif
irq_enable();
return 0;
}
@ -347,8 +363,13 @@ static int STDCALL kernel_entry(void* args)
{
int ret;
kernel_args_t* kernel_args = (kernel_args_t*) args;
#if MAX_CORES > 1
task_t* old = per_core(old_task);
start_first_time_slice();
if (old)
old->flags &= ~TASK_SWITCH_IN_PROGRESS;
#endif
irq_enable();
if (BUILTIN_EXPECT(!kernel_args, 0))
return -EINVAL;
@ -597,8 +618,13 @@ invalid:
static int STDCALL user_entry(void* arg)
{
int ret;
#if MAX_CORES > 1
task_t* old = per_core(old_task);
start_first_time_slice();
if (old)
old->flags &= ~TASK_SWITCH_IN_PROGRESS;
#endif
irq_enable();
if (BUILTIN_EXPECT(!arg, 0))
return -EINVAL;
@ -751,7 +777,7 @@ tid_t wait(int32_t* result)
if (BUILTIN_EXPECT(curr_task->status == TASK_IDLE, 0))
return -EINVAL;
mailbox_wait_msg_fetch(&curr_task->inbox, &tmp);
mailbox_wait_msg_fetch(&curr_task->inbox, &tmp, 0);
if (result)
*result = tmp.result;
@ -769,12 +795,9 @@ int wakeup_task(tid_t id)
{
int ret = -EINVAL;
/* avoid nested locking */
spinlock_irqsave_lock(&table_lock);
if (task_table[id].status != TASK_BLOCKED) {
kprintf("Task %d is not blocked!\n", id);
} else {
if (task_table[id].status == TASK_BLOCKED) {
task_table[id].status = TASK_READY;
ret = 0;
}
@ -784,28 +807,6 @@ int wakeup_task(tid_t id)
return ret;
}
/** @brief Block a running or ready task.
* @param id The task's tid_t structure
* @return
* - 0 on success
* - -EINVAL (-22) on failure
*/
int block_task(tid_t id)
{
int ret = -EINVAL;
spinlock_irqsave_lock(&table_lock);
if ((task_table[id].status == TASK_RUNNING) || (task_table[id].status == TASK_READY)) {
task_table[id].status = TASK_BLOCKED;
ret = 0;
} else kprintf("Unable to block task %d!\n", id);
spinlock_irqsave_unlock(&table_lock);
return ret;
}
/*
* we use this struct to guarantee that the id
* has its own cache line
@ -825,14 +826,18 @@ void scheduler(void)
task_t* curr_task;
uint32_t i;
uint32_t new_id;
uint64_t current_tick;
static last_id_t last_id = { 0 };
#if MAX_CORES > 1
spinlock_irqsave_lock(&table_lock);
#endif
current_tick = get_clock_tick();
orig_task = curr_task = per_core(current_task);
/* increase the number of used time slices */
curr_task->time_slices++;
/* signalizes that this task could be reused */
if (curr_task->status == TASK_FINISHED)
curr_task->status = TASK_INVALID;
@ -843,12 +848,29 @@ void scheduler(void)
curr_task->flags &= ~TASK_FPU_USED;
}
for(i=1, new_id=(last_id.id + 1) % MAX_TASKS;
for(i=0, new_id=(last_id.id + 1) % MAX_TASKS;
i<MAX_TASKS; i++, new_id=(new_id+1) % MAX_TASKS)
{
if (task_table[new_id].status == TASK_READY) {
if (curr_task->status == TASK_RUNNING)
if (task_table[new_id].flags & TASK_TIMER_USED) {
if (task_table[new_id].status != TASK_BLOCKED)
task_table[new_id].flags &= ~TASK_TIMER_USED;
if ((task_table[new_id].status == TASK_BLOCKED) && (current_tick >= task_table[new_id].timeout)) {
task_table[new_id].flags &= ~TASK_TIMER_USED;
task_table[new_id].status = TASK_READY;
}
}
if ((task_table[new_id].status == TASK_READY) && !(task_table[new_id].flags & TASK_SWITCH_IN_PROGRESS)) {
if (curr_task->status == TASK_RUNNING) {
curr_task->status = TASK_READY;
#if MAX_CORES > 1
curr_task->flags |= TASK_SWITCH_IN_PROGRESS;
per_core(old_task) = curr_task;
#endif
}
#if MAX_CORES > 1
else per_core(old_task) = NULL;
#endif
task_table[new_id].status = TASK_RUNNING;
curr_task = per_core(current_task) = task_table+new_id;
last_id.id = new_id;
@ -857,6 +879,10 @@ void scheduler(void)
}
}
#if MAX_CORES > 1
per_core(old_task) = NULL;
#endif
if ((curr_task->status == TASK_RUNNING) || (curr_task->status == TASK_IDLE))
goto get_task_out;
@ -868,14 +894,19 @@ void scheduler(void)
curr_task = per_core(current_task) = task_table+CORE_ID;
get_task_out:
//kprintf("schedule %d on core %d\n", per_core(current_task)->id, smp_id());
if (curr_task != orig_task)
switch_task(new_id);
#if MAX_CORES > 1
spinlock_irqsave_unlock(&table_lock);
#endif
if (curr_task != orig_task) {
//kprintf("schedule from %d to %d on core %d\n", orig_task->id, curr_task->id, smp_id());
switch_task(new_id);
#if MAX_CORES > 1
orig_task= per_core(old_task);
if (orig_task)
orig_task->flags &= ~TASK_SWITCH_IN_PROGRESS;
#endif
}
}
void reschedule(void)

View file

@ -44,14 +44,14 @@ static int consumer(void* arg)
int i, m = 0;
for(i=0; i<5; i++) {
sem_wait(&consuming);
sem_wait(&consuming, 0);
kprintf("Consumer got %d\n", val);
val = 0;
sem_post(&producing);
}
for(i=0; i<5; i++) {
mailbox_int32_fetch(&mbox, &m);
mailbox_int32_fetch(&mbox, &m, 0);
kprintf("Got mail %d\n", m);
}
@ -64,7 +64,7 @@ static int producer(void* arg)
int mail[5] = {1, 2, 3, 4, 5};
for(i=0; i<5; i++) {
sem_wait(&producing);
sem_wait(&producing, 0);
kprintf("Produce value: current val %d\n", val);
val = 42;
sem_post(&consuming);
@ -121,6 +121,8 @@ static int join_test(void* arg)
kprintf("Child %u finished: result = %d\n", id, result);
dump_scheduling_statistics();
return 0;
}

View file

@ -32,18 +32,22 @@
#define FALSE 0
#endif
#if SYS_LIGHTWEIGHT_PROT
#if MAX_CORES > 1
static spinlock_irqsave_t lwprot_lock;
#endif
#endif
/** Returns the current time in milliseconds,
* may be the same as sys_jiffies or at least based on it. */
u32_t
sys_now(void)
u32_t sys_now(void)
{
return (get_clock_tick() / TIMER_FREQ) * 1000;
return (get_clock_tick() / TIMER_FREQ) * 1000;
}
u32_t
sys_jiffies(void)
u32_t sys_jiffies(void)
{
return (get_clock_tick() / TIMER_FREQ) * 1000;
return (get_clock_tick() / TIMER_FREQ) * 1000;
}
#if !NO_SYS
@ -51,9 +55,13 @@ sys_jiffies(void)
/* sys_init(): init needed system resources
* Note: At the moment there are none
*/
void
sys_init(void)
void sys_init(void)
{
#if SYS_LIGHTWEIGHT_PROT
#if MAX_CORES > 1
spinlock_irqsave_init(&lwprot_lock);
#endif
#endif
}
/**
@ -61,28 +69,24 @@ sys_init(void)
*
* @param ms number of milliseconds to sleep
*/
void
sys_msleep(u32_t ms)
void sys_msleep(u32_t ms)
{
if (ms > 0) {
sys_sem_t delaysem;
err_t err = sys_sem_new(&delaysem, 0);
if (err == ERR_OK) {
sys_arch_sem_wait(&delaysem, ms);
sys_sem_free(&delaysem);
}
}
if (ms * TIMER_FREQ / 1000 > 0)
timer_wait(ms * TIMER_FREQ / 1000);
else if (ms > 0)
udelay(ms * 1000);
}
/* sys_thread_new(): Spawns a new thread with given attributes as supported
* Note: In MetalSVM this is realized as kernel tasks
*/
sys_thread_t sys_thread_new(const char *name, lwip_thread_fn thread, void *arg, int stacksize, int prio)
sys_thread_t sys_thread_new(const char *name, lwip_thread_fn thread, void *arg,
int stacksize, int prio)
{
tid_t tmp;
sys_thread_t tmp;
kprintf("Create LWIP task %s\n", name);
create_kernel_task(&tmp,thread,arg);
create_kernel_task(&tmp, thread, arg);
kprintf("Created LWIP task %s with id %u\n", name, tmp);
return tmp;
}
@ -108,10 +112,10 @@ int sys_sem_valid(sys_sem_t* sem)
/* sys_sem_new(): creates a new semaphre with given count.
* This semaphore becomes valid
*/
err_t sys_sem_new(sys_sem_t* sem,u8_t count)
err_t sys_sem_new(sys_sem_t* sem, u8_t count)
{
sem->valid = TRUE;
return sem_init(&sem->sem,count);
return sem_init(&sem->sem, count);
}
/* sys_sem_set_invalid(): this semapohore becomes invalid
@ -137,16 +141,11 @@ void sys_sem_signal(sys_sem_t* sem)
u32_t sys_arch_sem_wait(sys_sem_t *sem, u32_t timeout)
{
int err;
if (!timeout)
return sem_wait(&sem->sem);
while (timeout)
{
err = sem_trywait(&sem->sem);
if (err != -1)
return err;
udelay(1000);
timeout--;
}
err = sem_wait(&sem->sem, timeout);
if (!err)
return 0;
return SYS_ARCH_TIMEOUT;
}
@ -164,16 +163,11 @@ int sys_mbox_valid(sys_mbox_t * mbox)
*/
u32_t sys_arch_mbox_fetch(sys_mbox_t * mbox, void **msg, u32_t timeout)
{
if (!timeout)
return mailbox_ptr_fetch(&mbox->mailbox,msg);
int err;
while(timeout)
{
if (!mailbox_ptr_tryfetch(&mbox->mailbox,msg))
return 0;
udelay(1000);
timeout--;
}
err = mailbox_ptr_fetch(&mbox->mailbox, msg, timeout);
if (!err)
return 0;
return SYS_ARCH_TIMEOUT;
}
@ -192,13 +186,13 @@ void sys_mbox_free(sys_mbox_t* mbox)
*/
u32_t sys_arch_mbox_tryfetch(sys_mbox_t* mbox, void** msg)
{
return mailbox_ptr_tryfetch(&mbox->mailbox,msg);
mailbox_ptr_tryfetch(&mbox->mailbox, msg);
}
/* sys_mbox_new(): create a new mailbox with a minimum size of "size"
*
*/
err_t sys_mbox_new(sys_mbox_t* mbox,int size)
err_t sys_mbox_new(sys_mbox_t* mbox, int size)
{
mbox->valid = TRUE;
return mailbox_ptr_init(&mbox->mailbox);
@ -218,15 +212,19 @@ void sys_mbox_set_invalid(sys_mbox_t* mbox)
*/
err_t sys_mbox_trypost(sys_mbox_t *mbox, void *msg)
{
return mailbox_ptr_post(&mbox->mailbox,msg);
int err;
err = mailbox_ptr_trypost(&mbox->mailbox, msg);
return err;
}
/* sys_mbox_post(): post new data to the mailbox
*
*/
void sys_mbox_post(sys_mbox_t* mbox,void* msg)
void sys_mbox_post(sys_mbox_t* mbox, void* msg)
{
mailbox_ptr_post(&mbox->mailbox,msg);
mailbox_ptr_post(&mbox->mailbox, msg);
}
/* sys_mutex_lock(): lock the given mutex
@ -235,7 +233,7 @@ void sys_mbox_post(sys_mbox_t* mbox,void* msg)
*/
void sys_mutex_lock(sys_mutex_t* mutex)
{
sem_wait(mutex);
sem_wait(mutex, 0);
}
/* sys_mutex_unlock(): unlock the given mutex
@ -251,23 +249,22 @@ void sys_mutex_unlock(sys_mutex_t* mutex)
*/
err_t sys_mutex_new(sys_mutex_t * mutex)
{
sem_init(mutex,1);
sem_init(mutex, 1);
return 0;
}
#if SYS_LIGHTWEIGHT_PROT
#if MAX_CORES > 1
static spinlock_irqsave_t lwprot_lock = SPINLOCK_IRQSAVE_INIT;
sys_prot_t sys_arch_protect(void)
{
spinlock_irqsave_lock(&lwprot_lock);
return 0;
spinlock_irqsave_lock(&lwprot_lock);
return 0;
}
void sys_arch_unprotect(sys_prot_t pval)
{
spinlock_irqsave_unlock(&lwprot_lock);
LWIP_UNUSED_ARG(pval);
spinlock_irqsave_unlock(&lwprot_lock);
}
#endif
#endif

View file

@ -21,7 +21,7 @@ typedef struct
int valid;
} sys_mbox_t;
typedef tid_t* sys_thread_t;
typedef tid_t sys_thread_t;
#if SYS_LIGHTWEIGHT_PROT
#if MAX_CORES > 1

View file

@ -118,4 +118,5 @@
#define TIMERS_DEBUG LWIP_DBG_OFF
#define SOCKETS_DEBUG LWIP_DBG_OFF
//#define LWIP_TCPIP_THREAD_ALIVE() kputs("TCPIP thread is alive!\n")
#endif