mirror of
https://github.com/hermitcore/libhermit.git
synced 2025-03-09 00:00:03 +01:00
move functions to serialize load and store operations to the headers
HermitCore supports only 64bit processors => all processors support sfence & co => no descision at runtime required
This commit is contained in:
parent
bb1b451f13
commit
11c32c5676
2 changed files with 10 additions and 26 deletions
|
@ -625,15 +625,19 @@ inline static void invalid_cache(void) {
|
|||
asm volatile ("invd");
|
||||
}
|
||||
|
||||
/* Force strict CPU ordering */
|
||||
typedef void (*func_memory_barrier)(void);
|
||||
|
||||
#if 0
|
||||
// the old way to serialize the store and load operations
|
||||
static inline void mb(void) { asm volatile ("lock; addl $0,0(%%esp)" ::: "memory", "cc"); }
|
||||
static inline void rmb(void) { asm volatile ("lock; addl $0,0(%%esp)" ::: "memory", "cc"); }
|
||||
static inline void wmb(void) { asm volatile ("lock; addl $0,0(%%esp)" ::: "memory", "cc"); }
|
||||
#else
|
||||
/// Force strict CPU ordering, serializes load and store operations.
|
||||
extern func_memory_barrier mb;
|
||||
static inline void mb(void) { asm volatile("mfence":::"memory"); }
|
||||
/// Force strict CPU ordering, serializes load operations.
|
||||
extern func_memory_barrier rmb;
|
||||
static inline void rmb(void) { asm volatile("lfence":::"memory"); }
|
||||
/// Force strict CPU ordering, serializes store operations.
|
||||
extern func_memory_barrier wmb;
|
||||
static inline void wmb(void) { asm volatile("sfence" ::: "memory"); }
|
||||
#endif
|
||||
|
||||
/** @brief Get Extended Control Register
|
||||
*
|
||||
|
|
|
@ -49,11 +49,6 @@ extern void isrsyscall(void);
|
|||
cpu_info_t cpu_info = { 0, 0, 0, 0, 0};
|
||||
extern uint32_t cpu_freq;
|
||||
|
||||
static void default_mb(void)
|
||||
{
|
||||
asm volatile ("lock; addl $0,0(%%esp)" ::: "memory", "cc");
|
||||
}
|
||||
|
||||
static void default_save_fpu_state(union fpu_state* state)
|
||||
{
|
||||
asm volatile ("fnsave %0; fwait" : "=m"((*state).fsave) :: "memory");
|
||||
|
@ -75,10 +70,6 @@ static void default_fpu_init(union fpu_state* fpu)
|
|||
fp->fos = 0xffff0000u;
|
||||
}
|
||||
|
||||
func_memory_barrier mb = default_mb;
|
||||
func_memory_barrier rmb = default_mb;
|
||||
func_memory_barrier wmb = default_mb;
|
||||
|
||||
static void default_writefs(size_t fs)
|
||||
{
|
||||
wrmsr(MSR_FS_BASE, fs);
|
||||
|
@ -132,9 +123,6 @@ func_read_fsgs readgs = default_readgs;
|
|||
func_write_fsgs writefs = default_writefs;
|
||||
func_write_fsgs writegs = default_writegs;
|
||||
|
||||
static void mfence(void) { asm volatile("mfence" ::: "memory"); }
|
||||
static void lfence(void) { asm volatile("lfence" ::: "memory"); }
|
||||
static void sfence(void) { asm volatile("sfence" ::: "memory"); }
|
||||
handle_fpu_state save_fpu_state = default_save_fpu_state;
|
||||
handle_fpu_state restore_fpu_state = default_restore_fpu_state;
|
||||
handle_fpu_state fpu_init = default_fpu_init;
|
||||
|
@ -471,14 +459,6 @@ int cpu_detection(void) {
|
|||
set_per_core(__core_id, atomic_int32_read(¤t_boot_id));
|
||||
kprintf("Core id is set to %d\n", CORE_ID);
|
||||
|
||||
if (first_time && has_sse())
|
||||
wmb = sfence;
|
||||
|
||||
if (first_time && has_sse2()) {
|
||||
rmb = lfence;
|
||||
mb = mfence;
|
||||
}
|
||||
|
||||
if (has_fpu()) {
|
||||
if (first_time)
|
||||
kputs("Found and initialized FPU!\n");
|
||||
|
|
Loading…
Add table
Reference in a new issue