1
0
Fork 0
mirror of https://github.com/hermitcore/libhermit.git synced 2025-03-09 00:00:03 +01:00

rename function per_core_set into set_per_core

This commit is contained in:
Stefan Lankes 2015-07-21 18:44:13 +02:00
parent 8362e7bb1b
commit 8289b76cf9
4 changed files with 9 additions and 9 deletions

View file

@ -52,7 +52,7 @@ extern "C" {
} \
ptr; })
#define per_core_set(var, value) ({ \
#define set_per_core(var, value) ({ \
switch (sizeof(var)) { \
case 4: asm volatile ("movl %0, %%gs:(" #var ")" :: "r"(value)); \
break; \

View file

@ -240,7 +240,7 @@ int cpu_detection(void) {
kprintf("Core %d set per_core offset to 0x%x\n", apic_cpu_id(), rdmsr(MSR_GS_BASE));
/* set core id to apic_cpu_id */
per_core_set(__core_id, apic_cpu_id());
set_per_core(__core_id, apic_cpu_id());
if (first_time && has_sse())
wmb = sfence;

View file

@ -45,7 +45,7 @@ size_t* get_current_stack(void)
uint32_t core_id = CORE_ID;
task_t* curr_task = per_core(current_task);
per_core_set(kernel_stack, curr_task->stack + KERNEL_STACK_SIZE - 0x10);
set_per_core(kernel_stack, curr_task->stack + KERNEL_STACK_SIZE - 0x10);
task_state_segments[core_id].rsp0 = (size_t) curr_task->stack + KERNEL_STACK_SIZE - 0x10;
// use new page table

View file

@ -91,8 +91,8 @@ int multitasking_init(void)
task_table[0].prio = IDLE_PRIO;
task_table[0].stack = (char*) ((size_t)&boot_stack + core_id * KERNEL_STACK_SIZE);
per_core_set(kernel_stack, task_table[0].stack + KERNEL_STACK_SIZE - 0x10);
per_core_set(current_task, task_table+0);
set_per_core(kernel_stack, task_table[0].stack + KERNEL_STACK_SIZE - 0x10);
set_per_core(current_task, task_table+0);
task_table[0].page_map = read_cr3();
readyqueues[core_id].idle = task_table+0;
@ -140,7 +140,7 @@ int set_idle_task(void)
task_table[i].last_core = core_id;
task_table[i].last_stack_pointer = NULL;
task_table[i].stack = (char*) ((size_t)&boot_stack + core_id * KERNEL_STACK_SIZE);
per_core_set(kernel_stack, task_table[i].stack + KERNEL_STACK_SIZE - 0x10);
set_per_core(kernel_stack, task_table[i].stack + KERNEL_STACK_SIZE - 0x10);
task_table[i].prio = IDLE_PRIO;
spinlock_init(&task_table[i].vma_lock);
task_table[i].vma_list = NULL;
@ -149,7 +149,7 @@ int set_idle_task(void)
atomic_int32_set(&task_table[i].user_usage, 0);
task_table[i].page_map = read_cr3();
readyqueues[core_id].idle = task_table+i;
per_core_set(current_task, readyqueues[core_id].idle);
set_per_core(current_task, readyqueues[core_id].idle);
ret = 0;
break;
@ -601,7 +601,7 @@ size_t** scheduler(void)
if ((curr_task->status == TASK_RUNNING) || (curr_task->status == TASK_IDLE))
goto get_task_out;
curr_task = readyqueues[core_id].idle;
per_core_set(current_task, curr_task);
set_per_core(current_task, curr_task);
} else {
// Does the current task have an higher priority? => no task switch
if ((curr_task->prio > prio) && (curr_task->status == TASK_RUNNING))
@ -613,7 +613,7 @@ size_t** scheduler(void)
}
curr_task = readyqueues[core_id].queue[prio-1].first;
per_core_set(current_task, curr_task);
set_per_core(current_task, curr_task);
if (BUILTIN_EXPECT(curr_task->status == TASK_INVALID, 0)) {
kprintf("Upps!!!!!!! Got invalid task %d, orig task %d\n", curr_task->id, orig_task->id);
}