metalsvm/arch/x86/include/asm/processor.h
2010-11-24 22:00:03 +00:00

127 lines
3 KiB
C

/*
* Copyright 2010 Stefan Lankes, Chair for Operating Systems,
* RWTH Aachen University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This file is part of MetalSVM.
*/
#ifndef __ARCH_PROCESSOR_H__
#define __ARCH_PROCESSOR_H__
#include <metalsvm/stddef.h>
#include <asm/gdt.h>
#include <asm/apic.h>
#ifdef CONFIG_PCI
#include <asm/pci.h>
#endif
#ifdef CONFIG_ROCKCREEK
#include <asm/scc.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
inline static uint64_t rdtsc(void)
{
uint64_t x;
asm volatile ("rdtsc" : "=A" (x));
return x;
}
inline static void flush_cache(void) {
asm volatile ("wbinvd" : : : "memory");
}
inline static void invalid_cache(void) {
asm volatile ("invd");
}
inline static int get_return_value(void) {
int ret;
asm volatile ("movl %%eax, %0" : "=r"(ret));
return ret;
}
/* Force strict CPU ordering */
#ifdef CONFIG_ROCKCREEK
inline static void mb(void) { asm volatile ("lock; addl $0,0(%%esp)" ::: "memory"); }
inline static void rmb(void) { asm volatile ("lock; addl $0,0(%%esp)" ::: "memory"); }
inline static void wmb(void) { asm volatile ("lock; addl $0,0(%%esp)" ::: "memory"); }
#else
inline static void mb(void) { asm volatile("mfence" ::: "memory"); }
inline static void rmb(void) { asm volatile("lfence" ::: "memory"); }
inline static void wmb(void) { asm volatile("sfence" ::: "memory"); }
#endif
inline static void cpuid(uint32_t code, uint32_t* a, uint32_t* b, uint32_t* c, uint32_t* d) {
asm volatile ("cpuid" : "=a"(*a), "=b"(*b), "=c"(*c), "=d"(*d) : "0"(code));
}
inline static void flush_pipeline(void) {
uint32_t low = 0;
uint32_t high = 0;
uint32_t code = 0;
asm volatile ("cpuid" : "=a"(low), "=d"(high) : "0"(code) : "%ebx", "%ecx");
}
inline static uint64_t rdmsr(uint32_t msr) {
uint32_t low, high;
asm volatile ("rdmsr" : "=a" (low), "=d" (high) : "c" (msr));
return ((uint64_t)high << 32) | low;
}
/*
* invalidate (not flush!) lines in L1 that map to MPB lines
*/
inline static void cache_invalidate(void) {
#ifdef CONFIG_ROCKCREEK
asm volatile (".byte 0x0f; .byte 0x0a;\n" ); // CL1FLUSHMB
#endif
}
#define NOP1 asm volatile ("nop")
#define NOP2 asm volatile ("nop;nop")
#define NOP4 asm volatile ("nop;nop;nop;nop")
#define NOP8 asm volatile ("nop;nop;nop;nop;nop;nop;nop;nop")
inline static int system_init(void)
{
#ifdef CONFIG_ROCKCREEK
scc_init();
#endif
gdt_install();
apic_init();
#ifdef CONFIG_PCI
pci_init();
#endif
return 0;
}
inline static int system_calibration(void)
{
apic_calibration();
return 0;
}
#ifdef __cplusplus
}
#endif
#endif