From 11c32c5676b9d664f50f8e3cfc7ae1911df9de7b Mon Sep 17 00:00:00 2001 From: Stefan Lankes Date: Fri, 1 Jul 2016 19:45:27 +0200 Subject: [PATCH] move functions to serialize load and store operations to the headers HermitCore supports only 64bit processors => all processors support sfence & co => no descision at runtime required --- hermit/arch/x86/include/asm/processor.h | 16 ++++++++++------ hermit/arch/x86/kernel/processor.c | 20 -------------------- 2 files changed, 10 insertions(+), 26 deletions(-) diff --git a/hermit/arch/x86/include/asm/processor.h b/hermit/arch/x86/include/asm/processor.h index eae79ee1c..e188f7470 100644 --- a/hermit/arch/x86/include/asm/processor.h +++ b/hermit/arch/x86/include/asm/processor.h @@ -625,15 +625,19 @@ inline static void invalid_cache(void) { asm volatile ("invd"); } -/* Force strict CPU ordering */ -typedef void (*func_memory_barrier)(void); - +#if 0 +// the old way to serialize the store and load operations +static inline void mb(void) { asm volatile ("lock; addl $0,0(%%esp)" ::: "memory", "cc"); } +static inline void rmb(void) { asm volatile ("lock; addl $0,0(%%esp)" ::: "memory", "cc"); } +static inline void wmb(void) { asm volatile ("lock; addl $0,0(%%esp)" ::: "memory", "cc"); } +#else /// Force strict CPU ordering, serializes load and store operations. -extern func_memory_barrier mb; +static inline void mb(void) { asm volatile("mfence":::"memory"); } /// Force strict CPU ordering, serializes load operations. -extern func_memory_barrier rmb; +static inline void rmb(void) { asm volatile("lfence":::"memory"); } /// Force strict CPU ordering, serializes store operations. -extern func_memory_barrier wmb; +static inline void wmb(void) { asm volatile("sfence" ::: "memory"); } +#endif /** @brief Get Extended Control Register * diff --git a/hermit/arch/x86/kernel/processor.c b/hermit/arch/x86/kernel/processor.c index 0d903c680..9f493ca3f 100644 --- a/hermit/arch/x86/kernel/processor.c +++ b/hermit/arch/x86/kernel/processor.c @@ -49,11 +49,6 @@ extern void isrsyscall(void); cpu_info_t cpu_info = { 0, 0, 0, 0, 0}; extern uint32_t cpu_freq; -static void default_mb(void) -{ - asm volatile ("lock; addl $0,0(%%esp)" ::: "memory", "cc"); -} - static void default_save_fpu_state(union fpu_state* state) { asm volatile ("fnsave %0; fwait" : "=m"((*state).fsave) :: "memory"); @@ -75,10 +70,6 @@ static void default_fpu_init(union fpu_state* fpu) fp->fos = 0xffff0000u; } -func_memory_barrier mb = default_mb; -func_memory_barrier rmb = default_mb; -func_memory_barrier wmb = default_mb; - static void default_writefs(size_t fs) { wrmsr(MSR_FS_BASE, fs); @@ -132,9 +123,6 @@ func_read_fsgs readgs = default_readgs; func_write_fsgs writefs = default_writefs; func_write_fsgs writegs = default_writegs; -static void mfence(void) { asm volatile("mfence" ::: "memory"); } -static void lfence(void) { asm volatile("lfence" ::: "memory"); } -static void sfence(void) { asm volatile("sfence" ::: "memory"); } handle_fpu_state save_fpu_state = default_save_fpu_state; handle_fpu_state restore_fpu_state = default_restore_fpu_state; handle_fpu_state fpu_init = default_fpu_init; @@ -471,14 +459,6 @@ int cpu_detection(void) { set_per_core(__core_id, atomic_int32_read(¤t_boot_id)); kprintf("Core id is set to %d\n", CORE_ID); - if (first_time && has_sse()) - wmb = sfence; - - if (first_time && has_sse2()) { - rmb = lfence; - mb = mfence; - } - if (has_fpu()) { if (first_time) kputs("Found and initialized FPU!\n");