1
0
Fork 0
mirror of https://github.com/hermitcore/libhermit.git synced 2025-03-09 00:00:03 +01:00

stores the FPU registers not before another task want to use them

This commit is contained in:
Stefan Lankes 2015-07-12 16:39:27 +02:00
parent 6c16316374
commit 6ba3252012
5 changed files with 44 additions and 23 deletions

View file

@ -262,6 +262,13 @@ inline static uint32_t has_nx(void)
inline static uint32_t has_avx2(void) {
return cpu_info.feature4 & CPU_FEATURE_AVX2;
}
/// clear TS bit in cr0
static inline void clts(void)
{
asm volatile("clts");
}
/** @brief Read out time stamp counter
*
* The rdtsc asm command puts a 64 bit time stamp value

View file

@ -79,7 +79,7 @@ typedef struct i387_fxsave_struct {
typedef struct {
uint64_t xstate_bv;
uint64_t xcomp_bv;
uint64_t reserved[6];
uint64_t reserved[6];
} xsave_header_t;
typedef struct {

View file

@ -81,7 +81,7 @@ extern void isr30(void);
extern void isr31(void);
static void fault_handler(struct state *s);
static void fpu_handler(struct state *s);
extern void fpu_handler(struct state *s);
/*
* This is a very repetitive function... it's not hard, it's
@ -172,21 +172,6 @@ void isrs_install(void)
irq_install_handler(7, fpu_handler);
}
static void fpu_handler(struct state *s)
{
task_t* task = per_core(current_task);
asm volatile ("clts"); // clear the TS flag of cr0
if (!(task->flags & TASK_FPU_INIT)) {
// use the FPU at the first time => Initialize FPU
fpu_init(&task->fpu);
task->flags |= TASK_FPU_INIT;
}
restore_fpu_state(&task->fpu);
task->flags |= TASK_FPU_USED;
}
/** @brief Exception messages
*
* This is a simple string array. It contains the message that

View file

@ -119,6 +119,8 @@ typedef struct {
task_t* idle __attribute__ ((aligned (CACHE_LINE)));
/// previous task
task_t* old_task;
/// last task, which used the FPU
tid_t fpu_owner;
/// total number of tasks in the queue
uint32_t nr_tasks;
/// indicates the used priority queues

View file

@ -49,9 +49,9 @@ static spinlock_irqsave_t table_lock = SPINLOCK_IRQSAVE_INIT;
#if MAX_CORES > 1
static readyqueues_t readyqueues[MAX_CORES] = { \
[0 ... MAX_CORES-1] = {NULL, NULL, 0, 0, {[0 ... MAX_PRIO-2] = {NULL, NULL}}, {NULL, NULL}, SPINLOCK_IRQSAVE_INIT}};
[0 ... MAX_CORES-1] = {NULL, NULL, 0, 0, 0, {[0 ... MAX_PRIO-2] = {NULL, NULL}}, {NULL, NULL}, SPINLOCK_IRQSAVE_INIT}};
#else
static readyqueues_t readyqueues[1] = {[0] = {task_table+0, NULL, 0, 0, {[0 ... MAX_PRIO-2] = {NULL, NULL}}, {NULL, NULL}, SPINLOCK_IRQSAVE_INIT}};
static readyqueues_t readyqueues[1] = {[0] = {task_table+0, NULL, 0, 0, 0, {[0 ... MAX_PRIO-2] = {NULL, NULL}}, {NULL, NULL}, SPINLOCK_IRQSAVE_INIT}};
#endif
DEFINE_PER_CORE(task_t*, current_task, task_table+0);
@ -96,6 +96,32 @@ int multitasking_init(void)
return 0;
}
/* interrupt handler to save / restore the FPU context */
void fpu_handler(struct state *s)
{
task_t* task = per_core(current_task);
uint32_t core_id = CORE_ID;
clts(); // clear the TS flag of cr0
spinlock_irqsave_lock(&readyqueues[core_id].lock);
// did another already use the the FPU? => save FPU state
if (readyqueues[core_id].fpu_owner) {
save_fpu_state(&task_table[readyqueues[core_id].fpu_owner].fpu);
readyqueues[core_id].fpu_owner = 0;
}
spinlock_irqsave_unlock(&readyqueues[core_id].lock);
if (!(task->flags & TASK_FPU_INIT)) {
// use the FPU at the first time => Initialize FPU
fpu_init(&task->fpu);
task->flags |= TASK_FPU_INIT;
}
restore_fpu_state(&task->fpu);
task->flags |= TASK_FPU_USED;
}
int set_idle_task(void)
{
uint32_t i, core_id = CORE_ID;
@ -252,6 +278,7 @@ int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio, uint32_t c
if (id)
*id = i;
//kprintf("Create task %d with pml4 at 0x%llx\n", i, task_table[i].page_map);
ret = create_default_frame(task_table+i, ep, arg);
@ -594,19 +621,19 @@ size_t** scheduler(void)
}
get_task_out:
spinlock_irqsave_unlock(&readyqueues[core_id].lock);
if (curr_task != orig_task) {
/* if the original task is using the FPU, we need to save the FPU context */
if ((orig_task->flags & TASK_FPU_USED) && (orig_task->status == TASK_READY)) {
save_fpu_state(&(orig_task->fpu));
readyqueues[core_id].fpu_owner = orig_task->id;
orig_task->flags &= ~TASK_FPU_USED;
}
spinlock_irqsave_unlock(&readyqueues[core_id].lock);
//kprintf("schedule on core %d from %u to %u with prio %u\n", core_id, orig_task->id, curr_task->id, (uint32_t)curr_task->prio);
return (size_t**) &(orig_task->last_stack_pointer);
}
} else spinlock_irqsave_unlock(&readyqueues[core_id].lock);
return NULL;
}