metalsvm/arch/x86/kernel/processor.c

210 lines
4.8 KiB
C
Raw Permalink Normal View History

/*
* Copyright 2010 Stefan Lankes, Chair for Operating Systems,
* RWTH Aachen University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This file is part of MetalSVM.
*/
#include <metalsvm/stddef.h>
#include <metalsvm/stdio.h>
#include <metalsvm/string.h>
#include <metalsvm/time.h>
#include <metalsvm/processor.h>
#include <metalsvm/tasks.h>
#ifdef CONFIG_ROCKCREEK
2011-03-25 20:28:43 +01:00
#include <asm/RCCE_lib.h>
#endif
static void default_mb(void)
{
asm volatile ("lock; addl $0,0(%%esp)" ::: "memory", "cc");
}
static void default_save_fpu_state(union fpu_state* state)
{
asm volatile ("fnsave %0; fwait" : "=m"((*state).fsave) :: "memory");
}
static void default_restore_fpu_state(union fpu_state* state)
{
asm volatile ("frstor %0" :: "m"(state->fsave));
}
static void default_fpu_init(union fpu_state* fpu)
{
i387_fsave_t *fp = &fpu->fsave;
memset(fp, 0x00, sizeof(i387_fsave_t));
fp->cwd = 0xffff037fu;
fp->swd = 0xffff0000u;
fp->twd = 0xffffffffu;
fp->fos = 0xffff0000u;
}
func_memory_barrier mb = default_mb;
func_memory_barrier rmb = default_mb;
func_memory_barrier wmb = default_mb;
handle_fpu_state save_fpu_state = default_save_fpu_state;
handle_fpu_state restore_fpu_state = default_restore_fpu_state;
handle_fpu_state fpu_init = default_fpu_init;
static void mfence(void) { asm volatile("mfence" ::: "memory"); }
static void lfence(void) { asm volatile("lfence" ::: "memory"); }
static void sfence(void) { asm volatile("sfence" ::: "memory"); }
static void save_fpu_state_fxsr(union fpu_state* state)
{
asm volatile ("fxsave %0; fnclex" : "=m"((*state).fxsave) :: "memory");
}
static void restore_fpu_state_fxsr(union fpu_state* state)
{
asm volatile ("fxrstor %0" :: "m"(state->fxsave));
}
static void fpu_init_fxsr(union fpu_state* fpu)
{
i387_fxsave_t* fx = &fpu->fxsave;
memset(fx, 0x00, sizeof(i387_fxsave_t));
fx->cwd = 0x37f;
if (BUILTIN_EXPECT(has_sse(), 1))
fx->mxcsr = 0x1f80;
}
cpu_info_t cpu_info = { 0, 0 };
static uint32_t cpu_freq = 0;
int cpu_detection(void)
{
uint32_t a, b;
size_t cr4;
2012-07-22 13:16:17 +02:00
uint8_t first_time = 0;
2012-07-22 13:16:17 +02:00
if (!cpu_info.feature1) {
first_time = 1;
cpuid(1, &a, &b, &cpu_info.feature2, &cpu_info.feature1);
}
cr4 = read_cr4();
if (has_fxsr())
cr4 |= 0x200; // set the OSFXSR bit
if (has_sse())
cr4 |= 0x400; // set the OSXMMEXCPT bit
write_cr4(cr4);
2012-07-22 13:16:17 +02:00
if (first_time && has_sse())
wmb = sfence;
2012-07-22 13:16:17 +02:00
if (first_time && has_sse2()) {
rmb = lfence;
mb = mfence;
}
2012-07-22 13:16:17 +02:00
if (first_time && has_avx())
kprintf("The CPU owns the Advanced Vector Extensions (AVX). However, MetalSVM doesn't support AVX!\n");
if (has_fpu()) {
2012-07-22 13:16:17 +02:00
if (first_time)
kputs("Found and initialized FPU!\n");
asm volatile ("fninit");
}
2012-07-22 13:16:17 +02:00
if (first_time && has_fxsr()) {
save_fpu_state = save_fpu_state_fxsr;
restore_fpu_state = restore_fpu_state_fxsr;
fpu_init = fpu_init_fxsr;
}
2012-07-22 13:16:17 +02:00
if (first_time && on_hypervisor()) {
uint32_t c, d;
char vendor_id[13];
kprintf("MetalSVM is running on a hypervisor!\n");
cpuid(0x40000000, &a, &b, &c, &d);
memcpy(vendor_id, &b, 4);
memcpy(vendor_id+4, &c, 4);
memcpy(vendor_id+8, &d, 4);
vendor_id[12] = '\0';
kprintf("Hypervisor Vendor Id: %s\n", vendor_id);
kprintf("Maximum input value for hypervisor CPUID info: 0x%x\n", a);
}
return 0;
}
uint32_t detect_cpu_frequency(void)
{
#ifdef CONFIG_ROCKCREEK
if (cpu_freq > 0)
return cpu_freq;
2011-03-25 20:28:43 +01:00
cpu_freq = RC_REFCLOCKMHZ;
return cpu_freq;
#else
uint64_t start, end, diff;
uint64_t ticks, old;
if (BUILTIN_EXPECT(cpu_freq > 0, 0))
return cpu_freq;
old = get_clock_tick();
/* wait for the next time slice */
while((ticks = get_clock_tick()) - old == 0)
HALT;
rmb();
start = rdtsc();
/* wait a second to determine the frequency */
while(get_clock_tick() - ticks < TIMER_FREQ)
HALT;
rmb();
end = rdtsc();
diff = end > start ? end - start : start - end;
cpu_freq = (uint32_t) (diff / (uint64_t) 1000000);
return cpu_freq;
#endif
}
uint32_t get_cpu_frequency(void)
{
if (cpu_freq > 0)
return cpu_freq;
return detect_cpu_frequency();
}
void udelay(uint32_t usecs)
{
uint64_t diff, end, start = rdtsc();
uint64_t deadline = get_cpu_frequency() * usecs;
do {
mb();
end = rdtsc();
diff = end > start ? end - start : start - end;
if ((diff < deadline) && (deadline - diff > 50000))
check_workqueues();
} while(diff < deadline);
}