mirror of
https://github.com/hermitcore/libhermit.git
synced 2025-03-30 00:00:15 +01:00
Merge branch 'devel' into ibv_no_lib
Conflicts: arch/x86/kernel/entry.asm lwip tools/CMakeLists.txt tools/uhyve.c
This commit is contained in:
commit
3ae9af5ca4
33 changed files with 808 additions and 341 deletions
|
@ -13,7 +13,7 @@
|
|||
},
|
||||
|
||||
"version": {
|
||||
"name": "0.2.3",
|
||||
"name": "0.2.6",
|
||||
"desc": "HermitCore's kernel as libOS",
|
||||
"gpgSign": false
|
||||
},
|
||||
|
|
|
@ -36,6 +36,9 @@ foreach(MODULE ${KERNEL_MODULES})
|
|||
target_compile_definitions(${MODULE}
|
||||
PRIVATE -D__KERNEL__)
|
||||
|
||||
target_compile_definitions(${MODULE}
|
||||
PRIVATE -DMAX_ARGC_ENVC=${MAX_ARGC_ENVC})
|
||||
|
||||
target_compile_options(${MODULE}
|
||||
PRIVATE ${HERMIT_KERNEL_FLAGS})
|
||||
|
||||
|
@ -199,7 +202,7 @@ set(CPACK_SYSTEM_NAME all)
|
|||
|
||||
set(CPACK_PACKAGE_VERSION_MAJOR 0)
|
||||
set(CPACK_PACKAGE_VERSION_MINOR 2)
|
||||
set(CPACK_PACKAGE_VERSION_PATCH 3)
|
||||
set(CPACK_PACKAGE_VERSION_PATCH 6)
|
||||
|
||||
set(CPACK_PACKAGE_CONTACT "Stefan Lankes <slankes@eonerc.rwth-aachen.de>")
|
||||
|
||||
|
|
|
@ -51,26 +51,26 @@ extern "C" {
|
|||
#define GDT_FLAG_TSS_BUSY 0x02
|
||||
|
||||
#define GDT_FLAG_SEGMENT 0x10
|
||||
/// Privilege level: Ring 0
|
||||
/// Privilege level: Ring 0
|
||||
#define GDT_FLAG_RING0 0x00
|
||||
/// Privilege level: Ring 1
|
||||
#define GDT_FLAG_RING1 0x20
|
||||
/// Privilege level: Ring 2
|
||||
/// Privilege level: Ring 2
|
||||
#define GDT_FLAG_RING2 0x40
|
||||
/// Privilege level: Ring 3
|
||||
/// Privilege level: Ring 3
|
||||
#define GDT_FLAG_RING3 0x60
|
||||
/// Segment is present
|
||||
#define GDT_FLAG_PRESENT 0x80
|
||||
/// Segment was accessed
|
||||
#define GDT_FLAG_ACCESSED 0x01
|
||||
/**
|
||||
* @brief Granularity of segment limit
|
||||
/**
|
||||
* @brief Granularity of segment limit
|
||||
* - set: segment limit unit is 4 KB (page size)
|
||||
* - not set: unit is bytes
|
||||
*/
|
||||
#define GDT_FLAG_4K_GRAN 0x80
|
||||
/**
|
||||
* @brief Default operand size
|
||||
* @brief Default operand size
|
||||
* - set: 32 bit
|
||||
* - not set: 16 bit
|
||||
*/
|
||||
|
@ -78,7 +78,7 @@ extern "C" {
|
|||
#define GDT_FLAG_32_BIT 0x40
|
||||
#define GDT_FLAG_64_BIT 0x20
|
||||
|
||||
/** @brief Defines a GDT entry
|
||||
/** @brief Defines a GDT entry
|
||||
*
|
||||
* A global descriptor table entry consists of:
|
||||
* - 32 bit base address (chunkwise embedded into this structure)
|
||||
|
@ -110,21 +110,14 @@ typedef struct {
|
|||
size_t base;
|
||||
} __attribute__ ((packed)) gdt_ptr_t;
|
||||
|
||||
// a TSS descriptor is twice larger than a code/data descriptor
|
||||
#define GDT_ENTRIES (7+MAX_CORES*2)
|
||||
|
||||
#if GDT_ENTRIES > 8192
|
||||
#error Too many GDT entries!
|
||||
#endif
|
||||
|
||||
/** @brief Installs the global descriptor table
|
||||
*
|
||||
* The installation involves the following steps:
|
||||
* - set up the special GDT pointer
|
||||
* - set up the special GDT pointer
|
||||
* - set up the entries in our GDT
|
||||
* - finally call gdt_flush() in our assembler file
|
||||
* - finally call gdt_flush() in our assembler file
|
||||
* in order to tell the processor where the new GDT is
|
||||
* - update the new segment registers
|
||||
* - update the new segment registers
|
||||
*/
|
||||
void gdt_install(void);
|
||||
|
||||
|
@ -143,6 +136,10 @@ void gdt_set_gate(int num, unsigned long base, unsigned long limit,
|
|||
void configure_gdt_entry(gdt_entry_t *dest_entry, unsigned long base, unsigned long limit,
|
||||
unsigned char access, unsigned char gran);
|
||||
|
||||
/** @brief Initialize the task state segments
|
||||
*/
|
||||
void tss_init(tid_t id);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -117,9 +117,11 @@ extern "C" {
|
|||
#define CPU_FEATURE_AVX512VL (1 <<31)
|
||||
|
||||
// feature list 0x00000006
|
||||
#define CPU_FEATURE_IDA (1 << 0)
|
||||
#define CPU_FEATURE_IDA (1 << 1)
|
||||
#define CPU_FEATURE_ARAT (1 << 2)
|
||||
#define CPU_FEATURE_EPB (1 << 3)
|
||||
#define CPU_FEATURE_HWP (1 << 10)
|
||||
#define CPU_FEATURE_HWP (1 << 7)
|
||||
#define CPU_FEATURE_HWP_EPP (1 << 10)
|
||||
|
||||
/*
|
||||
* EFLAGS bits
|
||||
|
@ -256,6 +258,7 @@ extern "C" {
|
|||
#define MSR_HWP_REQUEST 0x00000774
|
||||
#define MSR_HWP_STATUS 0x00000777
|
||||
|
||||
#define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << 0)
|
||||
#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP (1ULL << 16)
|
||||
#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK (1ULL << 20)
|
||||
#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE (1ULL << 38)
|
||||
|
@ -298,6 +301,9 @@ typedef struct {
|
|||
|
||||
extern cpu_info_t cpu_info;
|
||||
|
||||
// reset FS & GS registers to the default values
|
||||
int reset_fsgs(int32_t core_id);
|
||||
|
||||
// determine the cpu features
|
||||
int cpu_detection(void);
|
||||
|
||||
|
|
|
@ -77,13 +77,6 @@ static inline int jump_to_user_code(size_t ep, size_t stack)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/** @brief Architecture dependent initialize routine
|
||||
*/
|
||||
static inline void arch_init_task(task_t* task)
|
||||
{
|
||||
set_tss((size_t) task->stack + KERNEL_STACK_SIZE - 0x10, (size_t) task->ist_addr + KERNEL_STACK_SIZE - 0x10);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -50,8 +50,6 @@
|
|||
*/
|
||||
extern const void kernel_start;
|
||||
|
||||
#define IOAPIC_ADDR ((size_t) &kernel_start - 2*PAGE_SIZE)
|
||||
#define LAPIC_ADDR ((size_t) &kernel_start - 1*PAGE_SIZE)
|
||||
#define MAX_APIC_CORES MAX_CORES
|
||||
#define SMP_SETUP_ADDR 0x8000ULL
|
||||
|
||||
|
@ -364,7 +362,7 @@ int apic_enable_timer(void)
|
|||
}
|
||||
|
||||
static apic_mp_t* search_mptable(size_t base, size_t limit) {
|
||||
size_t ptr=PAGE_FLOOR(base), vptr=0;
|
||||
size_t ptr=PAGE_FLOOR(base), old_ptr = 0;
|
||||
size_t flags = PG_GLOBAL | PG_RW | PG_PCD;
|
||||
apic_mp_t* tmp;
|
||||
uint32_t i;
|
||||
|
@ -373,15 +371,17 @@ static apic_mp_t* search_mptable(size_t base, size_t limit) {
|
|||
if (has_nx())
|
||||
flags |= PG_XD;
|
||||
|
||||
size_t vptr = vma_alloc(PAGE_SIZE, VMA_READ|VMA_WRITE);
|
||||
|
||||
while(ptr<=limit-sizeof(apic_mp_t)) {
|
||||
if (vptr) {
|
||||
if (old_ptr) {
|
||||
// unmap page via mapping a zero page
|
||||
page_unmap(vptr, 1);
|
||||
vptr = 0;
|
||||
page_unmap(old_ptr, 1);
|
||||
old_ptr = 0;
|
||||
}
|
||||
|
||||
if (BUILTIN_EXPECT(!page_map(ptr & PAGE_MASK, ptr & PAGE_MASK, 1, flags), 1)) {
|
||||
vptr = ptr & PAGE_MASK;
|
||||
if (BUILTIN_EXPECT(!page_map(vptr, ptr & PAGE_MASK, 1, flags), 1)) {
|
||||
old_ptr = vptr;
|
||||
} else {
|
||||
kprintf("Failed to map 0x%zx, which is required to search for the MP tables\n", ptr);
|
||||
return NULL;
|
||||
|
@ -390,10 +390,8 @@ static apic_mp_t* search_mptable(size_t base, size_t limit) {
|
|||
for(i=0; (vptr) && (i<PAGE_SIZE); i+=4) {
|
||||
tmp = (apic_mp_t*) (vptr+i);
|
||||
if (tmp->signature == MP_FLT_SIGNATURE) {
|
||||
if (!((tmp->version > 4) || (tmp->features[0]))) {
|
||||
vma_add(ptr & PAGE_MASK, (ptr & PAGE_MASK) + PAGE_SIZE, VMA_READ|VMA_WRITE);
|
||||
if (!((tmp->version > 4) || (tmp->features[0])))
|
||||
return tmp;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -402,7 +400,9 @@ static apic_mp_t* search_mptable(size_t base, size_t limit) {
|
|||
|
||||
if (vptr) {
|
||||
// unmap page via mapping a zero page
|
||||
page_unmap(vptr, 1);
|
||||
if (old_ptr)
|
||||
page_unmap(old_ptr, 1);
|
||||
vma_free(vptr, vptr + PAGE_SIZE);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
|
@ -479,7 +479,7 @@ static int wakeup_ap(uint32_t start_eip, uint32_t id)
|
|||
|
||||
if (!reset_vector) {
|
||||
reset_vector = (char*) vma_alloc(PAGE_SIZE, VMA_READ|VMA_WRITE);
|
||||
page_map((size_t)reset_vector, 0x00, 1, PG_RW|PG_GLOBAL|PG_PCD);
|
||||
page_map((size_t)reset_vector, 0x00, 1, PG_RW|PG_GLOBAL|PG_PCD|PG_NX);
|
||||
reset_vector += 0x467; // add base address of the reset vector
|
||||
LOG_DEBUG("Map reset vector to %p\n", reset_vector);
|
||||
}
|
||||
|
@ -581,8 +581,11 @@ int smp_init(void)
|
|||
* in real mode, switch to protected and finally they jump to smp_main.
|
||||
*/
|
||||
page_map(SMP_SETUP_ADDR, SMP_SETUP_ADDR, PAGE_CEIL(sizeof(boot_code)) >> PAGE_BITS, PG_RW|PG_GLOBAL);
|
||||
vma_add(SMP_SETUP_ADDR, SMP_SETUP_ADDR + PAGE_CEIL(sizeof(boot_code)), VMA_READ|VMA_WRITE|VMA_CACHEABLE);
|
||||
vma_add(SMP_SETUP_ADDR, SMP_SETUP_ADDR + PAGE_CEIL(sizeof(boot_code)),
|
||||
VMA_EXECUTE|VMA_READ|VMA_WRITE|VMA_CACHEABLE);
|
||||
memcpy((void*)SMP_SETUP_ADDR, boot_code, sizeof(boot_code));
|
||||
LOG_DEBUG("Map trampoline code at 0x%zx (size 0x%zx)\n",
|
||||
SMP_SETUP_ADDR, sizeof(boot_code));
|
||||
|
||||
for(i=0; i<sizeof(boot_code); i++)
|
||||
{
|
||||
|
@ -592,8 +595,6 @@ int smp_init(void)
|
|||
}
|
||||
}
|
||||
|
||||
LOG_DEBUG("size of the boot_code %d\n", sizeof(boot_code));
|
||||
|
||||
for(i=1; (i<ncores) && (i<MAX_CORES); i++)
|
||||
{
|
||||
atomic_int32_set(¤t_boot_id, i);
|
||||
|
@ -619,7 +620,7 @@ int smp_init(void)
|
|||
|
||||
|
||||
// How many ticks are used to calibrate the APIC timer
|
||||
#define APIC_TIMER_CALIBRATION_TICKS (3)
|
||||
#define APIC_TIMER_CALIBRATION_TICKS (1)
|
||||
|
||||
/*
|
||||
* detects the timer frequency of the APIC and restarts
|
||||
|
@ -634,8 +635,8 @@ int apic_calibration(void)
|
|||
return -ENXIO;
|
||||
|
||||
const uint64_t cpu_freq_hz = (uint64_t) get_cpu_frequency() * 1000000ULL;
|
||||
const uint64_t cycles_per_tick = cpu_freq_hz / (uint64_t) TIMER_FREQ;
|
||||
const uint64_t wait_cycles = cycles_per_tick * APIC_TIMER_CALIBRATION_TICKS;
|
||||
const uint64_t cycles_per_ms = cpu_freq_hz / 1000ULL;
|
||||
const uint64_t wait_cycles = cycles_per_ms * APIC_TIMER_CALIBRATION_TICKS;
|
||||
|
||||
// disable interrupts to increase calibration accuracy
|
||||
flags = irq_nested_disable();
|
||||
|
@ -656,7 +657,7 @@ int apic_calibration(void)
|
|||
} while(diff < wait_cycles);
|
||||
|
||||
// Calculate timer increments for desired tick frequency
|
||||
icr = (initial_counter - lapic_read(APIC_CCR)) / APIC_TIMER_CALIBRATION_TICKS;
|
||||
icr = ((initial_counter - lapic_read(APIC_CCR)) * 1000ULL) / (TIMER_FREQ * APIC_TIMER_CALIBRATION_TICKS);
|
||||
irq_nested_enable(flags);
|
||||
|
||||
lapic_reset();
|
||||
|
@ -723,7 +724,7 @@ static int apic_probe(void)
|
|||
|
||||
found_mp:
|
||||
if (!apic_mp) {
|
||||
LOG_INFO("Didn't find MP config table\n");
|
||||
LOG_INFO("Didn't find MP table\n");
|
||||
goto no_mp;
|
||||
}
|
||||
|
||||
|
@ -732,7 +733,7 @@ found_mp:
|
|||
isle = 0;
|
||||
}
|
||||
|
||||
LOG_INFO("Found MP config table at 0x%x\n", apic_mp->mp_config);
|
||||
LOG_INFO("Found MP config at 0x%x\n", apic_mp->mp_config);
|
||||
LOG_INFO("System uses Multiprocessing Specification 1.%u\n", apic_mp->version);
|
||||
LOG_INFO("MP features 1: %u\n", apic_mp->features[0]);
|
||||
|
||||
|
@ -748,8 +749,12 @@ found_mp:
|
|||
|
||||
apic_config = (apic_config_table_t*) ((size_t) apic_mp->mp_config);
|
||||
if (((size_t) apic_config & PAGE_MASK) != ((size_t) apic_mp & PAGE_MASK)) {
|
||||
page_map((size_t) apic_config & PAGE_MASK, (size_t) apic_config & PAGE_MASK, 1, flags);
|
||||
vma_add( (size_t) apic_config & PAGE_MASK, ((size_t) apic_config & PAGE_MASK) + PAGE_SIZE, VMA_READ|VMA_WRITE);
|
||||
size_t vconfig = vma_alloc(PAGE_SIZE, VMA_READ|VMA_WRITE);
|
||||
|
||||
if (BUILTIN_EXPECT(vconfig && !page_map(vconfig & PAGE_MASK, (size_t) apic_config & PAGE_MASK, 1, flags), 1)) {
|
||||
apic_config = (apic_config_table_t*) (vconfig | ((size_t) apic_config & ~PAGE_MASK));
|
||||
LOG_INFO("Map MP config at %p\n", apic_config);
|
||||
} else apic_config = 0;
|
||||
}
|
||||
|
||||
if (!apic_config || strncmp((void*) &apic_config->signature, "PCMP", 4) !=0) {
|
||||
|
@ -760,11 +765,11 @@ found_mp:
|
|||
addr = (size_t) apic_config;
|
||||
addr += sizeof(apic_config_table_t);
|
||||
|
||||
// does the apic table raise the page boundary? => map additional page
|
||||
// TODO: does the apic table raise the page boundary? => map additional page
|
||||
if (apic_config->entry_count * 20 + addr > ((size_t) apic_config & PAGE_MASK) + PAGE_SIZE)
|
||||
{
|
||||
page_map(((size_t) apic_config & PAGE_MASK) + PAGE_SIZE, ((size_t) apic_config & PAGE_MASK) + PAGE_SIZE, 1, flags);
|
||||
vma_add( ((size_t) apic_config & PAGE_MASK) + PAGE_SIZE, ((size_t) apic_config & PAGE_MASK) + 2*PAGE_SIZE, VMA_READ|VMA_WRITE);
|
||||
LOG_ERROR("APIC table raise limit\n");
|
||||
while(1) { HALT; }
|
||||
}
|
||||
|
||||
// search the ISA bus => required to redirect the IRQs
|
||||
|
@ -812,12 +817,16 @@ found_mp:
|
|||
ioapic = (ioapic_t*) ((size_t) io_entry->addr);
|
||||
LOG_INFO("Found IOAPIC at 0x%x\n", ioapic);
|
||||
if (is_single_kernel() && ioapic) {
|
||||
page_map(IOAPIC_ADDR, (size_t)ioapic & PAGE_MASK, 1, flags);
|
||||
vma_add(IOAPIC_ADDR, IOAPIC_ADDR + PAGE_SIZE, VMA_READ|VMA_WRITE);
|
||||
ioapic = (ioapic_t*) IOAPIC_ADDR;
|
||||
LOG_INFO("Map IOAPIC to 0x%x\n", ioapic);
|
||||
LOG_INFO("IOAPIC version: 0x%x\n", ioapic_version());
|
||||
LOG_INFO("Max Redirection Entry: %u\n", ioapic_max_redirection_entry());
|
||||
size_t vaddr = vma_alloc(PAGE_SIZE, VMA_READ|VMA_WRITE);
|
||||
if (BUILTIN_EXPECT(vaddr && !page_map(vaddr, (size_t)ioapic & PAGE_MASK, 1, flags), 1)) {
|
||||
ioapic = (ioapic_t*) (vaddr | ((size_t) ioapic & ~PAGE_MASK));
|
||||
LOG_INFO("Map IOAPIC to 0x%x\n", ioapic);
|
||||
LOG_INFO("IOAPIC version: 0x%x\n", ioapic_version());
|
||||
LOG_INFO("Max Redirection Entry: %u\n", ioapic_max_redirection_entry());
|
||||
} else {
|
||||
LOG_ERROR("Unable to map IOAPIC\n");
|
||||
ioapic = 0;
|
||||
}
|
||||
}
|
||||
addr += 8;
|
||||
} else if (*((uint8_t*) addr) == 3) { // IO_INT
|
||||
|
@ -853,13 +862,13 @@ check_lapic:
|
|||
LOG_INFO("Found and enable X2APIC\n");
|
||||
x2apic_enable();
|
||||
} else {
|
||||
if (page_map(LAPIC_ADDR, (size_t)lapic & PAGE_MASK, 1, flags)) {
|
||||
LOG_ERROR("Failed to map APIC to 0x%x\n", LAPIC_ADDR);
|
||||
goto out;
|
||||
size_t vaddr = vma_alloc(PAGE_SIZE, VMA_READ | VMA_WRITE);
|
||||
if (BUILTIN_EXPECT(vaddr && !page_map(vaddr, lapic & PAGE_MASK, 1, flags), 1)) {
|
||||
LOG_INFO("Mapped APIC 0x%x to 0x%x\n", lapic, vaddr);
|
||||
lapic = vaddr | (lapic & ~PAGE_MASK);
|
||||
} else {
|
||||
LOG_INFO("Mapped APIC 0x%x to 0x%x\n", lapic, LAPIC_ADDR);
|
||||
vma_add(LAPIC_ADDR, LAPIC_ADDR + PAGE_SIZE, VMA_READ | VMA_WRITE);
|
||||
lapic = LAPIC_ADDR;
|
||||
LOG_ERROR("Failed to map APIC to 0x%x\n", vaddr);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -900,15 +909,17 @@ no_mp:
|
|||
|
||||
extern int smp_main(void);
|
||||
extern void gdt_flush(void);
|
||||
extern int set_idle_task(void);
|
||||
extern tid_t set_idle_task(void);
|
||||
|
||||
#if MAX_CORES > 1
|
||||
int smp_start(void)
|
||||
{
|
||||
LOG_DEBUG("Try to initialize processor (local id %d)\n", atomic_int32_read(¤t_boot_id));
|
||||
int32_t core_id = atomic_int32_read(¤t_boot_id);
|
||||
|
||||
LOG_DEBUG("Try to initialize processor (local id %d)\n", core_id);
|
||||
|
||||
// use the same gdt like the boot processors
|
||||
gdt_flush();
|
||||
//gdt_flush();
|
||||
|
||||
// install IDT
|
||||
idt_install();
|
||||
|
@ -921,9 +932,14 @@ int smp_start(void)
|
|||
// reset APIC
|
||||
lapic_reset();
|
||||
|
||||
LOG_DEBUG("Processor %d (local id %d) is entering its idle task\n", apic_cpu_id(), atomic_int32_read(¤t_boot_id));
|
||||
LOG_DEBUG("CR0 of core %u: 0x%x\n", atomic_int32_read(¤t_boot_id), read_cr0());
|
||||
online[atomic_int32_read(¤t_boot_id)] = 1;
|
||||
LOG_DEBUG("Processor %d (local id %d) is entering its idle task\n", apic_cpu_id(), core_id);
|
||||
LOG_DEBUG("CR0 of core %u: 0x%x\n", core_id, read_cr0());
|
||||
online[core_id] = 1;
|
||||
|
||||
tid_t id = set_idle_task();
|
||||
|
||||
// initialize task state segment
|
||||
tss_init(id);
|
||||
|
||||
// set task switched flag for the first FPU access
|
||||
// => initialize the FPU
|
||||
|
@ -931,8 +947,6 @@ int smp_start(void)
|
|||
cr0 |= CR0_TS;
|
||||
write_cr0(cr0);
|
||||
|
||||
set_idle_task();
|
||||
|
||||
/*
|
||||
* TSS is set, pagining is enabled
|
||||
* => now, we are able to register our task
|
||||
|
|
|
@ -75,7 +75,7 @@ align 4
|
|||
global hcip
|
||||
global hcgateway
|
||||
global hcmask
|
||||
global kernel_start_host
|
||||
global host_logical_addr
|
||||
base dq 0
|
||||
limit dq 0
|
||||
cpu_freq dd 0
|
||||
|
@ -106,7 +106,8 @@ align 4
|
|||
hcip db 10,0,5,2
|
||||
hcgateway db 10,0,5,1
|
||||
hcmask db 255,255,255,0
|
||||
kernel_start_host dq 0
|
||||
host_logical_addr dq 0
|
||||
|
||||
; Bootstrap page tables are used during the initialization.
|
||||
align 4096
|
||||
boot_pml4:
|
||||
|
@ -229,15 +230,7 @@ Lno_pml4_init:
|
|||
%endif
|
||||
|
||||
; set default stack pointer
|
||||
mov rsp, boot_stack
|
||||
add rsp, KERNEL_STACK_SIZE-16
|
||||
xor rax, rax
|
||||
mov eax, [boot_processor]
|
||||
cmp eax, -1
|
||||
je L1
|
||||
imul eax, KERNEL_STACK_SIZE
|
||||
add rsp, rax
|
||||
L1:
|
||||
mov rsp, stack_top-0x10
|
||||
mov rbp, rsp
|
||||
|
||||
; jump to the boot processors's C code
|
||||
|
@ -248,14 +241,8 @@ L1:
|
|||
%if MAX_CORES > 1
|
||||
ALIGN 64
|
||||
Lsmp_main:
|
||||
xor rax, rax
|
||||
mov eax, DWORD [current_boot_id]
|
||||
|
||||
; set default stack pointer
|
||||
imul rax, KERNEL_STACK_SIZE
|
||||
add rax, boot_stack
|
||||
add rax, KERNEL_STACK_SIZE-16
|
||||
mov rsp, rax
|
||||
mov rsp, stack_top-0x10
|
||||
mov rbp, rsp
|
||||
|
||||
extern smp_start
|
||||
|
@ -726,15 +713,33 @@ sighandler_epilog:
|
|||
|
||||
jmp [rsp - 5 * 8] ; jump to rip from saved state
|
||||
|
||||
|
||||
global replace_boot_stack
|
||||
replace_boot_stack:
|
||||
; rdi = 1st argument = desination address
|
||||
|
||||
; set rsp to the new stack
|
||||
sub rsp, stack_bottom
|
||||
add rsp, rdi
|
||||
|
||||
; currently we omit the frame point => no recalculation
|
||||
;sub rbp, stack_bottom
|
||||
;add rbp, rdi
|
||||
|
||||
; copy boot stack to the new one
|
||||
cld
|
||||
mov rcx, KERNEL_STACK_SIZE
|
||||
mov rsi, stack_bottom
|
||||
rep movsb
|
||||
|
||||
ret
|
||||
|
||||
SECTION .data
|
||||
|
||||
align 4096
|
||||
global boot_stack
|
||||
boot_stack:
|
||||
TIMES (MAX_CORES*KERNEL_STACK_SIZE) DB 0xcd
|
||||
global boot_ist
|
||||
boot_ist:
|
||||
stack_bottom:
|
||||
TIMES KERNEL_STACK_SIZE DB 0xcd
|
||||
stack_top:
|
||||
|
||||
; add some hints to the ELF file
|
||||
SECTION .note.GNU-stack noalloc noexec nowrite progbits
|
||||
|
|
|
@ -31,19 +31,23 @@
|
|||
#include <hermit/tasks.h>
|
||||
#include <hermit/errno.h>
|
||||
#include <hermit/processor.h>
|
||||
#include <hermit/logging.h>
|
||||
#include <asm/gdt.h>
|
||||
#include <asm/tss.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
#define MAX_IST 3
|
||||
// minimal number of GDT entries (for one core)
|
||||
// a TSS descriptor is twice larger than a code/data descriptor
|
||||
#define GDT_MIN_ENTRIES (7+1*2)
|
||||
|
||||
gdt_ptr_t gp;
|
||||
// currently, our kernel has full access to the ioports
|
||||
static gdt_entry_t gdt[GDT_ENTRIES] = {[0 ... GDT_ENTRIES-1] = {0, 0, 0, 0, 0, 0}};
|
||||
static tss_t task_state_segments[MAX_CORES] __attribute__ ((aligned (PAGE_SIZE)));
|
||||
static uint8_t stack_table[MAX_CORES][KERNEL_STACK_SIZE*MAX_IST] __attribute__ ((aligned (PAGE_SIZE)));
|
||||
|
||||
extern const void boot_stack;
|
||||
// currently, our kernel has full access to the ioports
|
||||
static gdt_entry_t boot_gdt[GDT_MIN_ENTRIES] = {[0 ... GDT_MIN_ENTRIES-1] = {0, 0, 0, 0, 0, 0}};
|
||||
static gdt_entry_t* gdt = boot_gdt;
|
||||
|
||||
static tss_t* boot_tss = NULL;
|
||||
static tss_t** task_state_segments = &boot_tss;
|
||||
|
||||
/*
|
||||
* This is defined in entry.asm. We use this to properly reload
|
||||
|
@ -51,19 +55,27 @@ extern const void boot_stack;
|
|||
*/
|
||||
extern void gdt_flush(void);
|
||||
|
||||
extern const void boot_stack;
|
||||
/*
|
||||
* This is defined in entry.asm. We use this to properly replace
|
||||
* the current stack
|
||||
*/
|
||||
extern void replace_boot_stack(size_t);
|
||||
|
||||
extern int32_t boot_processor;
|
||||
extern atomic_int32_t possible_cpus;
|
||||
extern atomic_int32_t current_boot_id;
|
||||
|
||||
void set_tss(size_t rps0, size_t ist1)
|
||||
{
|
||||
task_state_segments[CORE_ID].rsp0 = rps0;
|
||||
task_state_segments[CORE_ID].ist1 = ist1;
|
||||
task_state_segments[CORE_ID]->rsp0 = rps0;
|
||||
task_state_segments[CORE_ID]->ist1 = ist1;
|
||||
}
|
||||
|
||||
/* Setup a descriptor in the Global Descriptor Table */
|
||||
void gdt_set_gate(int num, unsigned long base, unsigned long limit,
|
||||
unsigned char access, unsigned char gran)
|
||||
{
|
||||
configure_gdt_entry(&gdt[num], base, limit, access, gran);
|
||||
configure_gdt_entry(gdt+num, base, limit, access, gran);
|
||||
}
|
||||
|
||||
void configure_gdt_entry(gdt_entry_t *dest_entry, unsigned long base, unsigned long limit,
|
||||
|
@ -92,13 +104,11 @@ void configure_gdt_entry(gdt_entry_t *dest_entry, unsigned long base, unsigned l
|
|||
*/
|
||||
void gdt_install(void)
|
||||
{
|
||||
int i, num = 0;
|
||||
|
||||
memset(task_state_segments, 0x00, MAX_CORES*sizeof(tss_t));
|
||||
int num = 0;
|
||||
|
||||
/* Setup the GDT pointer and limit */
|
||||
gp.limit = (sizeof(gdt_entry_t) * GDT_ENTRIES) - 1;
|
||||
gp.base = (size_t) &gdt;
|
||||
gp.limit = (sizeof(gdt_entry_t) * GDT_MIN_ENTRIES) - 1;
|
||||
gp.base = (size_t) gdt;
|
||||
|
||||
/* Our NULL descriptor */
|
||||
gdt_set_gate(num++, 0, 0, 0, 0);
|
||||
|
@ -143,20 +153,103 @@ void gdt_install(void)
|
|||
gdt_set_gate(num++, 0, 0,
|
||||
GDT_FLAG_RING3 | GDT_FLAG_SEGMENT | GDT_FLAG_DATASEG | GDT_FLAG_PRESENT, GDT_FLAG_64_BIT);
|
||||
|
||||
/*
|
||||
* Create TSS for each core (we use these segments for task switching)
|
||||
*/
|
||||
for(i=0; i<MAX_CORES; i++) {
|
||||
task_state_segments[i].rsp0 = (size_t)&boot_stack + (i+1) * KERNEL_STACK_SIZE - 0x10;
|
||||
task_state_segments[i].ist1 = 0; // ist will created per task
|
||||
task_state_segments[i].ist2 = (size_t) stack_table[i] + (2 /*IST number */ - 1) * KERNEL_STACK_SIZE - 0x10;
|
||||
task_state_segments[i].ist3 = (size_t) stack_table[i] + (3 /*IST number */ - 1) * KERNEL_STACK_SIZE - 0x10;
|
||||
task_state_segments[i].ist4 = (size_t) stack_table[i] + (4 /*IST number */ - 1) * KERNEL_STACK_SIZE - 0x10;
|
||||
|
||||
gdt_set_gate(num+i*2, (unsigned long) (task_state_segments+i), sizeof(tss_t),
|
||||
GDT_FLAG_PRESENT | GDT_FLAG_TSS | GDT_FLAG_RING0, 0);
|
||||
}
|
||||
|
||||
/* Flush out the old GDT and install the new changes! */
|
||||
gdt_flush();
|
||||
}
|
||||
|
||||
/*
|
||||
* Create TSS for the current core (we use these segments for task switching)
|
||||
*/
|
||||
void tss_init(tid_t id /* => current task id */)
|
||||
{
|
||||
int32_t no_cpus = atomic_int32_read(&possible_cpus);
|
||||
int32_t core_id = atomic_int32_read(¤t_boot_id);
|
||||
|
||||
LOG_INFO("Initialize TSS for task %d on core %d, possible cores %d\n",
|
||||
id, core_id, no_cpus);
|
||||
|
||||
if ((task_state_segments == &boot_tss) && (no_cpus > 1))
|
||||
{
|
||||
task_state_segments = (tss_t**) kmalloc(sizeof(tss_t*)*no_cpus);
|
||||
if (BUILTIN_EXPECT(!task_state_segments, 0)) {
|
||||
LOG_ERROR("Unable to allocate array for the task state segments\n");
|
||||
goto oom;
|
||||
}
|
||||
|
||||
memset(task_state_segments, 0x00, sizeof(tss_t*)*no_cpus);
|
||||
task_state_segments[0] = boot_tss;
|
||||
}
|
||||
|
||||
if ((gdt == boot_gdt) && (no_cpus > 1))
|
||||
{
|
||||
gdt = (gdt_entry_t*) kmalloc(sizeof(gdt_entry_t)*(7+no_cpus*2));
|
||||
if (BUILTIN_EXPECT(!gdt, 0)) {
|
||||
LOG_ERROR("Unable to allocate GDT\n");
|
||||
goto oom;
|
||||
}
|
||||
|
||||
memset(gdt, 0x00, sizeof(gdt_entry_t)*(7+no_cpus*2));
|
||||
memcpy(gdt, &boot_gdt, sizeof(gdt_entry_t)*GDT_MIN_ENTRIES);
|
||||
|
||||
gp.limit = (sizeof(gdt_entry_t) * (7+no_cpus*2)) - 1;
|
||||
gp.base = (size_t) gdt;
|
||||
}
|
||||
|
||||
tss_t* tss = (tss_t*) kmalloc(sizeof(tss_t));
|
||||
if (BUILTIN_EXPECT(!tss, 0)) {
|
||||
LOG_ERROR("Unable to allocate task state segment\n");
|
||||
goto oom;
|
||||
}
|
||||
|
||||
memset(tss, 0x00, sizeof(tss_t));
|
||||
|
||||
size_t rsp0 = (size_t) create_stack(KERNEL_STACK_SIZE);
|
||||
if (BUILTIN_EXPECT(!rsp0, 0)) {
|
||||
LOG_ERROR("Unable to allocate stack for the idle task %d\n", id);
|
||||
goto oom;
|
||||
}
|
||||
tss->rsp0 = rsp0 + KERNEL_STACK_SIZE - 0x10;
|
||||
|
||||
size_t ist1 = (size_t) create_stack(KERNEL_STACK_SIZE);
|
||||
if (BUILTIN_EXPECT(!ist1, 0)) {
|
||||
LOG_ERROR("Unable to allocate ist1 for the idle task %d\n", id);
|
||||
goto oom;
|
||||
}
|
||||
tss->ist1 = (size_t) ist1 + KERNEL_STACK_SIZE - 0x10;
|
||||
|
||||
tss->ist2 = (size_t) create_stack(KERNEL_STACK_SIZE) + KERNEL_STACK_SIZE - 0x10;
|
||||
if (BUILTIN_EXPECT(!tss->ist2, 0)) {
|
||||
LOG_ERROR("Unable to allocate ist2 for the idle task %d\n", id);
|
||||
goto oom;
|
||||
}
|
||||
|
||||
tss->ist3 = (size_t) create_stack(KERNEL_STACK_SIZE) + KERNEL_STACK_SIZE - 0x10;
|
||||
if (BUILTIN_EXPECT(!tss->ist3, 0)) {
|
||||
LOG_ERROR("Unable to allocate ist3 for the idle task %d\n", id);
|
||||
goto oom;
|
||||
}
|
||||
|
||||
tss->ist4 = (size_t) create_stack(KERNEL_STACK_SIZE) + KERNEL_STACK_SIZE - 0x10;
|
||||
if (BUILTIN_EXPECT(!tss->ist4, 0)) {
|
||||
LOG_ERROR("Unable to allocate ist4 for the idle task %d\n", id);
|
||||
goto oom;
|
||||
}
|
||||
|
||||
task_state_segments[core_id] = tss;
|
||||
gdt_set_gate(7+core_id*2, (unsigned long) tss,
|
||||
sizeof(tss_t), GDT_FLAG_PRESENT | GDT_FLAG_TSS | GDT_FLAG_RING0, 0);
|
||||
|
||||
// set stack in our task table
|
||||
set_boot_stack(id, rsp0, ist1);
|
||||
|
||||
// replace the stack pointer
|
||||
replace_boot_stack(rsp0);
|
||||
|
||||
gdt_flush();
|
||||
reset_fsgs(core_id);
|
||||
|
||||
return;
|
||||
|
||||
oom:
|
||||
while(1) { HALT; }
|
||||
}
|
||||
|
|
|
@ -361,13 +361,18 @@ static void check_est(uint8_t out)
|
|||
LOG_INFO("P-State HWP enabled\n");
|
||||
}
|
||||
|
||||
if (c & CPU_FEATURE_EPB) {
|
||||
if (c & CPU_FEATURE_HWP_EPP) {
|
||||
// for maximum performance we have to clear BIAS
|
||||
wrmsr(MSR_IA32_ENERGY_PERF_BIAS, 0);
|
||||
if (out)
|
||||
LOG_INFO("Found Performance and Energy Bias Hint support: 0x%llx\n", rdmsr(MSR_IA32_ENERGY_PERF_BIAS));
|
||||
}
|
||||
|
||||
if (c & CPU_FEATURE_ARAT)
|
||||
LOG_INFO("Timer runs with a constant rate!");
|
||||
else
|
||||
LOG_INFO("Timer doesn't run with a constant rate!");
|
||||
|
||||
#if 0
|
||||
if (out) {
|
||||
LOG_INFO("CPU features 6: 0x%x, 0x%x, 0x%x, 0x%x\n", a, b, c, d);
|
||||
|
@ -391,6 +396,19 @@ static void check_est(uint8_t out)
|
|||
return;
|
||||
}
|
||||
|
||||
int reset_fsgs(int32_t core_id)
|
||||
{
|
||||
writefs(0);
|
||||
#if MAX_CORES > 1
|
||||
writegs(core_id * ((size_t) &percore_end0 - (size_t) &percore_start));
|
||||
#else
|
||||
writegs(0);
|
||||
#endif
|
||||
wrmsr(MSR_KERNEL_GS_BASE, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cpu_detection(void) {
|
||||
uint64_t xcr0;
|
||||
uint32_t a=0, b=0, c=0, d=0, level = 0, extended = 0;
|
||||
|
@ -416,7 +434,7 @@ int cpu_detection(void) {
|
|||
cpuid(0x80000000, &extended, &b, &c, &d);
|
||||
if (extended >= 0x80000001)
|
||||
cpuid(0x80000001, &a, &b, &c, &cpu_info.feature3);
|
||||
if (extended >= 0x80000008) {
|
||||
if (extended >= 0x80000004) {
|
||||
uint32_t* bint = (uint32_t*) cpu_brand;
|
||||
|
||||
cpuid(0x80000002, bint+0, bint+1, bint+2, bint+3);
|
||||
|
@ -451,7 +469,7 @@ int cpu_detection(void) {
|
|||
kprintf("Syscall instruction: %s\n", (cpu_info.feature3 & CPU_FEATURE_SYSCALL) ? "available" : "unavailable");
|
||||
}
|
||||
|
||||
//TODO: add check for SMEP and SMAP
|
||||
//TODO: add check for SMEP, PCE and SMAP
|
||||
|
||||
// be sure that AM, NE and MP is enabled
|
||||
cr0 = read_cr0();
|
||||
|
@ -476,7 +494,9 @@ int cpu_detection(void) {
|
|||
cr4 |= CR4_MCE; // enable machine check exceptions
|
||||
//if (has_vmx())
|
||||
// cr4 |= CR4_VMXE;
|
||||
cr4 &= ~CR4_TSD; // => every privilege level is able to use rdtsc
|
||||
cr4 &= ~(CR4_PCE|CR4_TSD); // disable performance monitoring counter
|
||||
// clear TSD => every privilege level is able
|
||||
// to use rdtsc
|
||||
write_cr4(cr4);
|
||||
|
||||
|
||||
|
@ -529,13 +549,7 @@ int cpu_detection(void) {
|
|||
//if (has_vmx())
|
||||
// wrmsr(MSR_IA32_FEATURE_CONTROL, rdmsr(MSR_IA32_FEATURE_CONTROL) | 0x5);
|
||||
|
||||
writefs(0);
|
||||
#if MAX_CORES > 1
|
||||
writegs(atomic_int32_read(¤t_boot_id) * ((size_t) &percore_end0 - (size_t) &percore_start));
|
||||
#else
|
||||
writegs(0);
|
||||
#endif
|
||||
wrmsr(MSR_KERNEL_GS_BASE, 0);
|
||||
reset_fsgs(atomic_int32_read(¤t_boot_id));
|
||||
|
||||
LOG_INFO("Core %d set per_core offset to 0x%x\n", atomic_int32_read(¤t_boot_id), rdmsr(MSR_GS_BASE));
|
||||
|
||||
|
@ -633,6 +647,7 @@ int cpu_detection(void) {
|
|||
LOG_INFO("Maximum input value for hypervisor: 0x%x\n", a);
|
||||
}
|
||||
|
||||
|
||||
if (first_time) {
|
||||
LOG_INFO("CR0 0x%llx, CR4 0x%llx\n", read_cr0(), read_cr4());
|
||||
LOG_INFO("size of xsave_t: %d\n", sizeof(xsave_t));
|
||||
|
|
|
@ -38,6 +38,7 @@
|
|||
#include <asm/multiboot.h>
|
||||
|
||||
#define GAP_BELOW 0x100000ULL
|
||||
#define IB_POOL_SIZE 0x400000ULL
|
||||
|
||||
#define IB_MEMORY_SIZE (1UL << 20)
|
||||
#define IB_MEMORY_NPAGES (IB_MEMORY_SIZE / PAGE_SIZE)
|
||||
|
@ -60,6 +61,9 @@ extern const void kernel_end;
|
|||
|
||||
uint8_t * host_kernel_start = NULL;
|
||||
|
||||
extern void* host_logical_addr;
|
||||
uint64_t ib_pool_addr = 0;
|
||||
|
||||
static spinlock_t list_lock = SPINLOCK_INIT;
|
||||
|
||||
static free_list_t init_list = {0, 0, NULL, NULL};
|
||||
|
@ -367,6 +371,20 @@ int memory_init(void)
|
|||
}
|
||||
}
|
||||
|
||||
// Ok, we are now able to use our memory management => update tss
|
||||
tss_init(0);
|
||||
|
||||
if (host_logical_addr) {
|
||||
LOG_INFO("Host has its guest logical address at %p\n", host_logical_addr);
|
||||
size_t phyaddr = get_pages(IB_POOL_SIZE >> PAGE_BITS);
|
||||
LOG_INFO("Allocate %d MB at physical address 0x%zx for the IB pool\n", IB_POOL_SIZE >> 20, phyaddr);
|
||||
if (BUILTIN_EXPECT(!page_map((size_t)host_logical_addr+phyaddr, phyaddr, IB_POOL_SIZE >> PAGE_BITS, PG_GLOBAL|PG_RW), 1)) {
|
||||
vma_add((size_t)host_logical_addr+phyaddr, (size_t)host_logical_addr+phyaddr+IB_POOL_SIZE, VMA_READ|VMA_WRITE|VMA_CACHEABLE);
|
||||
ib_pool_addr = (size_t)host_logical_addr+phyaddr;
|
||||
LOG_INFO("Map IB pool at 0x%zx\n", ib_pool_addr);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
oom:
|
||||
|
|
|
@ -96,7 +96,7 @@ static uint8_t expect_zeroed_pages = 0;
|
|||
size_t virt_to_phys(size_t addr)
|
||||
{
|
||||
if ((addr > (size_t) &kernel_start) &&
|
||||
(addr <= PAGE_2M_FLOOR((size_t) &kernel_start + image_size)))
|
||||
(addr <= PAGE_2M_CEIL((size_t) &kernel_start + image_size)))
|
||||
{
|
||||
size_t vpn = addr >> (PAGE_2M_BITS); // virtual page number
|
||||
size_t entry = self[1][vpn]; // page table entry
|
||||
|
@ -354,6 +354,9 @@ void page_fault_handler(struct state *s)
|
|||
|
||||
spinlock_irqsave_unlock(&page_lock);
|
||||
|
||||
// clear cr2 to signalize that the pagefault is solved by the pagefault handler
|
||||
write_cr2(0);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -372,6 +375,9 @@ default_handler:
|
|||
if (task->heap)
|
||||
LOG_ERROR("Heap 0x%llx - 0x%llx\n", task->heap->start, task->heap->end);
|
||||
|
||||
// clear cr2 to signalize that the pagefault is solved by the pagefault handler
|
||||
write_cr2(0);
|
||||
|
||||
apic_eoi(s->int_no);
|
||||
//do_abort();
|
||||
sys_exit(-EFAULT);
|
||||
|
@ -395,7 +401,8 @@ int page_init(void)
|
|||
|
||||
while(((size_t) cmdline + i) <= ((size_t) cmdline + cmdsize))
|
||||
{
|
||||
page_map(((size_t) cmdline + i) & PAGE_MASK, ((size_t) cmdline + i) & PAGE_MASK, 1, PG_GLOBAL|PG_RW|PG_PRESENT);
|
||||
page_map(((size_t) cmdline + i) & PAGE_MASK, ((size_t) cmdline + i) & PAGE_MASK,
|
||||
1, PG_NX|PG_GLOBAL|PG_RW|PG_PRESENT);
|
||||
i += PAGE_SIZE;
|
||||
}
|
||||
} else cmdline = 0;
|
||||
|
|
|
@ -35,18 +35,20 @@ int vma_arch_init(void)
|
|||
int ret = 0;
|
||||
|
||||
if (mb_info) {
|
||||
ret = vma_add((size_t)mb_info & PAGE_MASK, ((size_t)mb_info & PAGE_MASK) + PAGE_SIZE, VMA_READ|VMA_WRITE);
|
||||
ret = vma_add((size_t)mb_info & PAGE_MASK, ((size_t)mb_info & PAGE_MASK) + PAGE_SIZE,
|
||||
VMA_READ|VMA_WRITE|VMA_CACHEABLE);
|
||||
if (BUILTIN_EXPECT(ret, 0))
|
||||
goto out;
|
||||
|
||||
if ((mb_info->flags & MULTIBOOT_INFO_CMDLINE) && cmdline) {
|
||||
LOG_INFO("vma_arch_init: map cmdline %p (size 0x%zd)", cmdline, cmdsize);
|
||||
LOG_INFO("vma_arch_init: map cmdline %p (size 0x%zd)\n", cmdline, cmdsize);
|
||||
|
||||
size_t i = 0;
|
||||
while(((size_t) cmdline + i) < ((size_t) cmdline + cmdsize))
|
||||
{
|
||||
if ((((size_t)cmdline + i) & PAGE_MASK) != ((size_t) mb_info & PAGE_MASK)) {
|
||||
ret = vma_add(((size_t)cmdline + i) & PAGE_MASK, (((size_t)cmdline + i) & PAGE_MASK) + PAGE_SIZE, VMA_READ|VMA_WRITE);
|
||||
ret = vma_add(((size_t)cmdline + i) & PAGE_MASK, (((size_t)cmdline + i) & PAGE_MASK) + PAGE_SIZE,
|
||||
VMA_READ|VMA_WRITE|VMA_CACHEABLE);
|
||||
if (BUILTIN_EXPECT(ret, 0))
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
set(PACKAGE_VERSION "0.2.2" CACHE STRING
|
||||
set(PACKAGE_VERSION "0.2.6" CACHE STRING
|
||||
"HermitCore current version")
|
||||
|
||||
set(MAX_CORES "512" CACHE STRING
|
||||
|
@ -19,6 +19,10 @@ set(KERNEL_STACK_SIZE 8192 CACHE STRING
|
|||
set(DEFAULT_STACK_SIZE 262144 CACHE STRING
|
||||
"Task stack size in bytes")
|
||||
|
||||
set(MAX_ARGC_ENVC 128 CACHE STRING
|
||||
"Maximum number of command line parameters and enviroment variables
|
||||
forwarded to uhyve")
|
||||
|
||||
option(DYNAMIC_TICKS
|
||||
"Don't use a periodic timer event to keep track of time" ON)
|
||||
|
||||
|
|
|
@ -99,6 +99,7 @@ function(build_external NAME PATH DEPENDS)
|
|||
-DLOCAL_PREFIX_BASE_DIR=${LOCAL_PREFIX_BASE_DIR}
|
||||
-DCMAKE_INSTALL_MESSAGE=NEVER
|
||||
-DCMAKE_EXPORT_COMPILE_COMMANDS=true
|
||||
-DMAX_ARGC_ENVC=${MAX_ARGC_ENVC}
|
||||
--no-warn-unused-cli
|
||||
${DO_PROFILING}
|
||||
${CMD_VARS}
|
||||
|
|
|
@ -63,7 +63,7 @@ then
|
|||
fi
|
||||
|
||||
echo "-- Local CMake v${MAJOR}.${MINOR} installed to ${CMAKE_DIR_REL}"
|
||||
echo "-- Next time you source this script, no download will be neccessary"
|
||||
echo "-- Next time you source this script, no download will be necessary"
|
||||
fi
|
||||
|
||||
export PATH="${CMAKE_DIR}/bin:${PATH}"
|
||||
|
|
|
@ -50,6 +50,9 @@
|
|||
#define TX_BUF_LEN 4096
|
||||
#define MIN(a, b) (a) < (b) ? (a) : (b)
|
||||
|
||||
static uint8_t rx_buffer[RX_BUF_LEN+16 /* header size */] __attribute__ ((aligned (PAGE_SIZE)));
|
||||
static uint8_t tx_buffer[4][TX_BUF_LEN] __attribute__ ((aligned (PAGE_SIZE)));
|
||||
|
||||
/*
|
||||
* To set the RTL8139 to accept only the Transmit OK (TOK) and Receive OK (ROK)
|
||||
* interrupts, we would have the TOK and ROK bits of the IMR high and leave the
|
||||
|
@ -328,26 +331,15 @@ err_t rtl8139if_init(struct netif* netif)
|
|||
rtl8139if->irq = pci_info.irq;
|
||||
|
||||
/* allocate the receive buffer */
|
||||
rtl8139if->rx_buffer = page_alloc(RX_BUF_LEN + 16 /* header size */, VMA_READ|VMA_WRITE);
|
||||
if (!(rtl8139if->rx_buffer)) {
|
||||
LOG_ERROR("rtl8139if_init: out of memory\n");
|
||||
kfree(rtl8139if);
|
||||
return ERR_MEM;
|
||||
}
|
||||
memset(rtl8139if->rx_buffer, 0x00, RX_BUF_LEN + 16);
|
||||
rtl8139if->rx_buffer = rx_buffer;
|
||||
//memset(rtl8139if->rx_buffer, 0x00, RX_BUF_LEN + 16);
|
||||
|
||||
/* allocate the send buffers */
|
||||
rtl8139if->tx_buffer[0] = page_alloc(4*TX_BUF_LEN, VMA_READ|VMA_WRITE);
|
||||
if (!(rtl8139if->tx_buffer[0])) {
|
||||
LOG_ERROR("rtl8139if_init: out of memory\n");
|
||||
page_free(rtl8139if->rx_buffer, RX_BUF_LEN + 16);
|
||||
kfree(rtl8139if);
|
||||
return ERR_MEM;
|
||||
}
|
||||
memset(rtl8139if->tx_buffer[0], 0x00, 4*TX_BUF_LEN);
|
||||
rtl8139if->tx_buffer[1] = rtl8139if->tx_buffer[0] + 1*TX_BUF_LEN;
|
||||
rtl8139if->tx_buffer[2] = rtl8139if->tx_buffer[0] + 2*TX_BUF_LEN;
|
||||
rtl8139if->tx_buffer[3] = rtl8139if->tx_buffer[0] + 3*TX_BUF_LEN;
|
||||
rtl8139if->tx_buffer[0] = tx_buffer[0];
|
||||
//memset(rtl8139if->tx_buffer[0], 0x00, 4*TX_BUF_LEN);
|
||||
rtl8139if->tx_buffer[1] = tx_buffer[1];
|
||||
rtl8139if->tx_buffer[2] = tx_buffer[2];
|
||||
rtl8139if->tx_buffer[3] = tx_buffer[3];
|
||||
|
||||
netif->state = rtl8139if;
|
||||
mynetif = netif;
|
||||
|
@ -355,8 +347,6 @@ err_t rtl8139if_init(struct netif* netif)
|
|||
tmp32 = inportl(rtl8139if->iobase + TCR);
|
||||
if (tmp32 == 0xFFFFFF) {
|
||||
LOG_ERROR("rtl8139if_init: ERROR\n");
|
||||
page_free(rtl8139if->rx_buffer, RX_BUF_LEN + 16);
|
||||
page_free(rtl8139if->tx_buffer[0], 4*TX_BUF_LEN);
|
||||
kfree(rtl8139if);
|
||||
memset(netif, 0x00, sizeof(struct netif));
|
||||
mynetif = NULL;
|
||||
|
@ -400,8 +390,6 @@ err_t rtl8139if_init(struct netif* netif)
|
|||
if (!tmp16) {
|
||||
// it seems not to work
|
||||
LOG_ERROR("RTL8139 reset failed\n");
|
||||
page_free(rtl8139if->rx_buffer, RX_BUF_LEN + 16);
|
||||
page_free(rtl8139if->tx_buffer[0], 4*TX_BUF_LEN);
|
||||
kfree(rtl8139if);
|
||||
memset(netif, 0x00, sizeof(struct netif));
|
||||
mynetif = NULL;
|
||||
|
|
|
@ -195,7 +195,7 @@ static void uhyve_netif_poll(void)
|
|||
struct pbuf *p = NULL;
|
||||
struct pbuf *q;
|
||||
|
||||
if (uhyve_net_read_sync(uhyve_netif->rx_buf, &len) == 0)
|
||||
while (uhyve_net_read_sync(uhyve_netif->rx_buf, &len) == 0)
|
||||
{
|
||||
#if ETH_PAD_SIZE
|
||||
len += ETH_PAD_SIZE; /*allow room for Ethernet padding */
|
||||
|
@ -251,7 +251,7 @@ err_t uhyve_netif_init (struct netif* netif)
|
|||
|
||||
memset(uhyve_netif, 0x00, sizeof(uhyve_netif_t));
|
||||
|
||||
uhyve_netif->rx_buf = page_alloc(RX_BUF_LEN + 16 /* header size */, VMA_READ|VMA_WRITE);
|
||||
uhyve_netif->rx_buf = page_alloc(RX_BUF_LEN + 16 /* header size */, VMA_READ|VMA_WRITE|VMA_CACHEABLE);
|
||||
if (!(uhyve_netif->rx_buf)) {
|
||||
LOG_ERROR("uhyve_netif_init: out of memory\n");
|
||||
kfree(uhyve_netif);
|
||||
|
@ -259,7 +259,7 @@ err_t uhyve_netif_init (struct netif* netif)
|
|||
}
|
||||
memset(uhyve_netif->rx_buf, 0x00, RX_BUF_LEN + 16);
|
||||
|
||||
uhyve_netif->tx_buf[0] = page_alloc(TX_BUF_NUM * TX_BUF_LEN, VMA_READ|VMA_WRITE);
|
||||
uhyve_netif->tx_buf[0] = page_alloc(TX_BUF_NUM * TX_BUF_LEN, VMA_READ|VMA_WRITE|VMA_CACHEABLE);
|
||||
if (!(uhyve_netif->tx_buf[0])) {
|
||||
LOG_ERROR("uhyve_netif_init: out of memory\n");
|
||||
page_free(uhyve_netif->rx_buf, RX_BUF_LEN + 16);
|
||||
|
|
|
@ -56,14 +56,15 @@ extern "C" {
|
|||
* - 0 on success
|
||||
* - -EINVAL on invalid argument
|
||||
*/
|
||||
inline static int sem_init(sem_t* s, unsigned int v) {
|
||||
inline static int sem_init(sem_t* s, unsigned int v)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
if (BUILTIN_EXPECT(!s, 0))
|
||||
return -EINVAL;
|
||||
|
||||
s->value = v;
|
||||
s->pos = 0;
|
||||
s->rpos = s->wpos = 0;
|
||||
for(i=0; i<MAX_TASKS; i++)
|
||||
s->queue[i] = MAX_TASKS;
|
||||
spinlock_irqsave_init(&s->lock);
|
||||
|
@ -76,7 +77,8 @@ inline static int sem_init(sem_t* s, unsigned int v) {
|
|||
* - 0 on success
|
||||
* - -EINVAL on invalid argument
|
||||
*/
|
||||
inline static int sem_destroy(sem_t* s) {
|
||||
inline static int sem_destroy(sem_t* s)
|
||||
{
|
||||
if (BUILTIN_EXPECT(!s, 0))
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -94,7 +96,8 @@ inline static int sem_destroy(sem_t* s) {
|
|||
* - -EINVAL on invalid argument
|
||||
* - -ECANCELED on failure (You still have to wait)
|
||||
*/
|
||||
inline static int sem_trywait(sem_t* s) {
|
||||
inline static int sem_trywait(sem_t* s)
|
||||
{
|
||||
int ret = -ECANCELED;
|
||||
|
||||
if (BUILTIN_EXPECT(!s, 0))
|
||||
|
@ -114,12 +117,13 @@ inline static int sem_trywait(sem_t* s) {
|
|||
*
|
||||
* @param s Address of the according sem_t structure
|
||||
* @param ms Timeout in milliseconds
|
||||
* @return
|
||||
* @return
|
||||
* - 0 on success
|
||||
* - -EINVAL on invalid argument
|
||||
* - -ETIME on timer expired
|
||||
*/
|
||||
inline static int sem_wait(sem_t* s, uint32_t ms) {
|
||||
inline static int sem_wait(sem_t* s, uint32_t ms)
|
||||
{
|
||||
task_t* curr_task = per_core(current_task);
|
||||
|
||||
if (BUILTIN_EXPECT(!s, 0))
|
||||
|
@ -132,8 +136,8 @@ next_try1:
|
|||
s->value--;
|
||||
spinlock_irqsave_unlock(&s->lock);
|
||||
} else {
|
||||
s->queue[s->pos] = curr_task->id;
|
||||
s->pos = (s->pos + 1) % MAX_TASKS;
|
||||
s->queue[s->wpos] = curr_task->id;
|
||||
s->wpos = (s->wpos + 1) % MAX_TASKS;
|
||||
block_current_task();
|
||||
spinlock_irqsave_unlock(&s->lock);
|
||||
reschedule();
|
||||
|
@ -157,8 +161,8 @@ next_try2:
|
|||
spinlock_irqsave_unlock(&s->lock);
|
||||
goto timeout;
|
||||
}
|
||||
s->queue[s->pos] = curr_task->id;
|
||||
s->pos = (s->pos + 1) % MAX_TASKS;
|
||||
s->queue[s->wpos] = curr_task->id;
|
||||
s->wpos = (s->wpos + 1) % MAX_TASKS;
|
||||
set_timer(deadline);
|
||||
spinlock_irqsave_unlock(&s->lock);
|
||||
reschedule();
|
||||
|
@ -181,28 +185,23 @@ timeout:
|
|||
return 0;
|
||||
}
|
||||
|
||||
/** @brief Give back resource
|
||||
/** @brief Give back resource
|
||||
* @return
|
||||
* - 0 on success
|
||||
* - -EINVAL on invalid argument
|
||||
*/
|
||||
inline static int sem_post(sem_t* s) {
|
||||
unsigned int k, i;
|
||||
|
||||
inline static int sem_post(sem_t* s)
|
||||
{
|
||||
if (BUILTIN_EXPECT(!s, 0))
|
||||
return -EINVAL;
|
||||
|
||||
spinlock_irqsave_lock(&s->lock);
|
||||
|
||||
s->value++;
|
||||
i = s->pos;
|
||||
for(k=0; k<MAX_TASKS; k++) {
|
||||
if (s->queue[i] < MAX_TASKS) {
|
||||
wakeup_task(s->queue[i]);
|
||||
s->queue[i] = MAX_TASKS;
|
||||
break;
|
||||
}
|
||||
i = (i + 1) % MAX_TASKS;
|
||||
if (s->queue[s->rpos] < MAX_TASKS) {
|
||||
wakeup_task(s->queue[s->rpos]);
|
||||
s->queue[s->rpos] = MAX_TASKS;
|
||||
s->rpos = (s->rpos + 1) % MAX_TASKS;
|
||||
}
|
||||
|
||||
spinlock_irqsave_unlock(&s->lock);
|
||||
|
|
|
@ -46,8 +46,10 @@ typedef struct sem {
|
|||
unsigned int value;
|
||||
/// Queue of waiting tasks
|
||||
tid_t queue[MAX_TASKS];
|
||||
/// Position in queue
|
||||
unsigned int pos;
|
||||
/// Position in queue to add a task
|
||||
unsigned int wpos;
|
||||
/// Position in queue to get a task
|
||||
unsigned int rpos;
|
||||
/// Access lock
|
||||
spinlock_irqsave_t lock;
|
||||
} sem_t;
|
||||
|
|
|
@ -186,7 +186,9 @@ inline static int spinlock_irqsave_lock(spinlock_irqsave_t* s) {
|
|||
|
||||
ticket = atomic_int64_inc(&s->queue);
|
||||
while (atomic_int64_read(&s->dequeue) != ticket) {
|
||||
irq_nested_enable(flags);
|
||||
PAUSE;
|
||||
irq_nested_disable();
|
||||
}
|
||||
|
||||
s->coreid = CORE_ID;
|
||||
|
|
|
@ -280,6 +280,10 @@ static inline void check_workqueues(void)
|
|||
*/
|
||||
int is_proxy(void);
|
||||
|
||||
/** @brief initialized the stacks of the idle tasks
|
||||
*/
|
||||
int set_boot_stack(tid_t id, size_t stack, size_t ist_addr);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -41,6 +41,7 @@
|
|||
#include <asm/page.h>
|
||||
#include <asm/uart.h>
|
||||
#include <asm/multiboot.h>
|
||||
#include <asm/uhyve.h>
|
||||
|
||||
#include <lwip/init.h>
|
||||
#include <lwip/sys.h>
|
||||
|
@ -65,6 +66,22 @@
|
|||
#define HERMIT_PORT 0x494E
|
||||
#define HERMIT_MAGIC 0x7E317
|
||||
|
||||
/* Ports and data structures for command line args + envp forwarding to uhyve */
|
||||
#define UHYVE_PORT_CMDSIZE 0x509
|
||||
#define UHYVE_PORT_CMDVAL 0x510
|
||||
|
||||
typedef struct {
|
||||
int argc;
|
||||
int argsz[MAX_ARGC_ENVC];
|
||||
int envc;
|
||||
int envsz[MAX_ARGC_ENVC];
|
||||
} __attribute__ ((packed)) uhyve_cmdsize_t;
|
||||
|
||||
typedef struct {
|
||||
char **argv;
|
||||
char **envp;
|
||||
} __attribute__ ((packed)) uhyve_cmdval_t;
|
||||
|
||||
static struct netif default_netif;
|
||||
static const int sobufsize = 131072;
|
||||
|
||||
|
@ -295,8 +312,9 @@ int smp_main(void)
|
|||
print_status();
|
||||
|
||||
/* wait for the other cpus */
|
||||
while(atomic_int32_read(&cpu_online) < atomic_int32_read(&possible_cpus))
|
||||
while(atomic_int32_read(&cpu_online) < atomic_int32_read(&possible_cpus)) {
|
||||
PAUSE;
|
||||
}
|
||||
|
||||
while(1) {
|
||||
check_workqueues();
|
||||
|
@ -371,6 +389,53 @@ static int initd(void* arg)
|
|||
// initialize network
|
||||
err = init_netifs();
|
||||
|
||||
if (is_uhyve()) {
|
||||
int i;
|
||||
uhyve_cmdsize_t uhyve_cmdsize;
|
||||
uhyve_cmdval_t uhyve_cmdval;
|
||||
uhyve_cmdval_t uhyve_cmdval_phys;
|
||||
|
||||
uhyve_send(UHYVE_PORT_CMDSIZE,
|
||||
(unsigned)virt_to_phys((size_t)&uhyve_cmdsize));
|
||||
|
||||
uhyve_cmdval.argv = kmalloc(uhyve_cmdsize.argc * sizeof(char *));
|
||||
for(i=0; i<uhyve_cmdsize.argc; i++)
|
||||
uhyve_cmdval.argv[i] = kmalloc(uhyve_cmdsize.argsz[i] * sizeof(char));
|
||||
uhyve_cmdval.envp = kmalloc(uhyve_cmdsize.envc * sizeof(char *));
|
||||
for(i=0; i<uhyve_cmdsize.envc; i++)
|
||||
uhyve_cmdval.envp[i] = kmalloc(uhyve_cmdsize.envsz[i] * sizeof(char));
|
||||
|
||||
// create a similar structure with guest physical addresses
|
||||
char** argv_virt = uhyve_cmdval_phys.argv = kmalloc(uhyve_cmdsize.argc * sizeof(char *));
|
||||
for(i=0; i<uhyve_cmdsize.argc; i++)
|
||||
uhyve_cmdval_phys.argv[i] = (char*) virt_to_phys((size_t) uhyve_cmdval.argv[i]);
|
||||
uhyve_cmdval_phys.argv = (char**) virt_to_phys((size_t) uhyve_cmdval_phys.argv);
|
||||
|
||||
char** envp_virt = uhyve_cmdval_phys.envp = kmalloc(uhyve_cmdsize.envc * sizeof(char *));
|
||||
for(i=0; i<uhyve_cmdsize.envc-1; i++)
|
||||
uhyve_cmdval_phys.envp[i] = (char*) virt_to_phys((size_t) uhyve_cmdval.envp[i]);
|
||||
// the last element is always NULL
|
||||
uhyve_cmdval_phys.envp[uhyve_cmdsize.envc-1] = NULL;
|
||||
uhyve_cmdval_phys.envp = (char**) virt_to_phys((size_t) uhyve_cmdval_phys.envp);
|
||||
|
||||
uhyve_send(UHYVE_PORT_CMDVAL,
|
||||
(unsigned)virt_to_phys((size_t)&uhyve_cmdval_phys));
|
||||
|
||||
LOG_INFO("Boot time: %d ms\n", (get_clock_tick() * 1000) / TIMER_FREQ);
|
||||
libc_start(uhyve_cmdsize.argc, uhyve_cmdval.argv, uhyve_cmdval.envp);
|
||||
|
||||
for(i=0; i<argc; i++)
|
||||
kfree(uhyve_cmdval.argv[i]);
|
||||
kfree(uhyve_cmdval.argv);
|
||||
for(i=0; i<envc; i++)
|
||||
kfree(uhyve_cmdval.envp[i]);
|
||||
kfree(uhyve_cmdval.envp);
|
||||
kfree(argv_virt);
|
||||
kfree(envp_virt);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
if ((err != 0) || !is_proxy())
|
||||
{
|
||||
char* dummy[] = {"app_name", NULL};
|
||||
|
|
|
@ -72,10 +72,6 @@ DEFINE_PER_CORE(task_t*, current_task, task_table+0);
|
|||
DEFINE_PER_CORE(uint32_t, __core_id, 0);
|
||||
#endif
|
||||
|
||||
extern const void boot_stack;
|
||||
extern const void boot_ist;
|
||||
|
||||
|
||||
static void update_timer(task_t* first)
|
||||
{
|
||||
if(first) {
|
||||
|
@ -269,38 +265,46 @@ int multitasking_init(void)
|
|||
}
|
||||
|
||||
task_table[0].prio = IDLE_PRIO;
|
||||
task_table[0].stack = (char*) ((size_t)&boot_stack + core_id * KERNEL_STACK_SIZE);
|
||||
task_table[0].ist_addr = (char*)&boot_ist;
|
||||
task_table[0].stack = NULL; // will be initialized later
|
||||
task_table[0].ist_addr = NULL; // will be initialized later
|
||||
set_per_core(current_task, task_table+0);
|
||||
arch_init_task(task_table+0);
|
||||
|
||||
readyqueues[core_id].idle = task_table+0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int set_idle_task(void)
|
||||
int set_boot_stack(tid_t id, size_t stack, size_t ist_addr)
|
||||
{
|
||||
uint32_t i, core_id = CORE_ID;
|
||||
int ret = -ENOMEM;
|
||||
if (id < MAX_CORES) {
|
||||
task_table[id].stack = (void*) stack;
|
||||
task_table[id].ist_addr = (void*) ist_addr;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
tid_t set_idle_task(void)
|
||||
{
|
||||
uint32_t core_id = CORE_ID;
|
||||
tid_t id = ~0;
|
||||
|
||||
spinlock_irqsave_lock(&table_lock);
|
||||
|
||||
for(i=0; i<MAX_TASKS; i++) {
|
||||
for(uint32_t i=0; i<MAX_TASKS; i++) {
|
||||
if (task_table[i].status == TASK_INVALID) {
|
||||
task_table[i].id = i;
|
||||
task_table[i].id = id = i;
|
||||
task_table[i].status = TASK_IDLE;
|
||||
task_table[i].last_core = core_id;
|
||||
task_table[i].last_stack_pointer = NULL;
|
||||
task_table[i].stack = (char*) ((size_t)&boot_stack + core_id * KERNEL_STACK_SIZE);
|
||||
task_table[i].ist_addr = create_stack(KERNEL_STACK_SIZE);
|
||||
task_table[i].stack = NULL;
|
||||
task_table[i].ist_addr = NULL;
|
||||
task_table[i].prio = IDLE_PRIO;
|
||||
task_table[i].heap = NULL;
|
||||
readyqueues[core_id].idle = task_table+i;
|
||||
set_per_core(current_task, readyqueues[core_id].idle);
|
||||
arch_init_task(task_table+i);
|
||||
ret = 0;
|
||||
|
||||
break;
|
||||
}
|
||||
|
@ -308,7 +312,7 @@ int set_idle_task(void)
|
|||
|
||||
spinlock_irqsave_unlock(&table_lock);
|
||||
|
||||
return ret;
|
||||
return id;
|
||||
}
|
||||
|
||||
void finish_task_switch(void)
|
||||
|
|
2
lwip
2
lwip
|
@ -1 +1 @@
|
|||
Subproject commit c21d911aa4f178563dc102f595d1d97dd8471458
|
||||
Subproject commit d9c0ff8d247d5398bf96f00e61a74e9701fdbd0f
|
31
mm/vma.c
31
mm/vma.c
|
@ -91,6 +91,8 @@ size_t vma_alloc(size_t size, uint32_t flags)
|
|||
size_t base = VMA_MIN;
|
||||
size_t limit = VMA_MAX;
|
||||
|
||||
size = PAGE_CEIL(size);
|
||||
|
||||
spinlock_irqsave_lock(lock);
|
||||
|
||||
// first fit search for free memory area
|
||||
|
@ -237,7 +239,7 @@ int vma_add(size_t start, size_t end, uint32_t flags)
|
|||
|
||||
if (pred && (pred->end == start) && (pred->flags == flags)) {
|
||||
pred->end = end; // resize VMA
|
||||
LOG_DEBUG("vma_alloc: resize vma, start 0x%zx, pred->start 0x%zx, pred->end 0x%zx\n", start, pred->start, pred->end);
|
||||
LOG_DEBUG("vma_add: resize vma, start 0x%zx, pred->start 0x%zx, pred->end 0x%zx\n", start, pred->start, pred->end);
|
||||
} else {
|
||||
// insert new VMA
|
||||
vma_t* new = kmalloc(sizeof(vma_t));
|
||||
|
@ -251,9 +253,11 @@ int vma_add(size_t start, size_t end, uint32_t flags)
|
|||
new->flags = flags;
|
||||
new->next = succ;
|
||||
new->prev = pred;
|
||||
LOG_DEBUG("vma_add: create new vma, new->start 0x%zx, new->end 0x%zx\n", new->start, new->end);
|
||||
|
||||
if (succ)
|
||||
succ->prev = new;
|
||||
|
||||
if (pred)
|
||||
pred->next = new;
|
||||
else
|
||||
|
@ -266,21 +270,22 @@ fail:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void print_vma(vma_t *vma)
|
||||
{
|
||||
while (vma) {
|
||||
LOG_INFO("0x%lx - 0x%lx: size=0x%x, flags=%c%c%c%s\n", vma->start, vma->end, vma->end - vma->start,
|
||||
(vma->flags & VMA_READ) ? 'r' : '-',
|
||||
(vma->flags & VMA_WRITE) ? 'w' : '-',
|
||||
(vma->flags & VMA_EXECUTE) ? 'x' : '-',
|
||||
(vma->flags & VMA_CACHEABLE) ? "" : " (uncached)");
|
||||
vma = vma->next;
|
||||
}
|
||||
}
|
||||
|
||||
void vma_dump(void)
|
||||
{
|
||||
void print_vma(vma_t *vma) {
|
||||
while (vma) {
|
||||
LOG_INFO("0x%lx - 0x%lx: size=0x%x, flags=%c%c%c%s\n", vma->start, vma->end, vma->end - vma->start,
|
||||
(vma->flags & VMA_READ) ? 'r' : '-',
|
||||
(vma->flags & VMA_WRITE) ? 'w' : '-',
|
||||
(vma->flags & VMA_EXECUTE) ? 'x' : '-',
|
||||
(vma->flags & VMA_CACHEABLE) ? "" : " (uncached)");
|
||||
vma = vma->next;
|
||||
}
|
||||
}
|
||||
|
||||
LOG_INFO("VMAs:\n");
|
||||
spinlock_irqsave_lock(&hermit_mm_lock);
|
||||
print_vma(&vma_boot);
|
||||
print_vma(vma_list);
|
||||
spinlock_irqsave_unlock(&hermit_mm_lock);
|
||||
}
|
||||
|
|
14
tests.sh
14
tests.sh
|
@ -4,7 +4,7 @@
|
|||
# it is written only for internal tests via Travis CI
|
||||
|
||||
TDIR=build/local_prefix/opt/hermit/x86_64-hermit/extra
|
||||
FILES="$TDIR/tests/hello $TDIR/tests/hellof $TDIR/tests/hello++ $TDIR/tests/thr_hello $TDIR/tests/pi $TDIR/benchmarks/stream $TDIR/benchmarks/basic $TDIR/tests/signals $TDIR/tests/test-malloc $TDIR/tests/test-malloc-mt"
|
||||
FILES="$TDIR/tests/hello $TDIR/tests/hellof $TDIR/tests/hello++ $TDIR/tests/thr_hello $TDIR/tests/pi $TDIR/benchmarks/stream $TDIR/benchmarks/basic $TDIR/tests/signals $TDIR/tests/test-malloc $TDIR/tests/test-malloc-mt $TDIR/tests/argv_envp"
|
||||
PROXY=build/local_prefix/opt/hermit/bin/proxy
|
||||
|
||||
for f in $FILES; do echo "check $f..."; HERMIT_ISLE=qemu HERMIT_CPUS=1 HERMIT_KVM=0 HERMIT_VERBOSE=1 timeout --kill-after=5m 5m $PROXY $f || exit 1; done
|
||||
|
@ -21,3 +21,15 @@ sleep 1
|
|||
|
||||
# kill server
|
||||
kill $!
|
||||
|
||||
# test connection via netio
|
||||
wget http://web.ars.de/wp-content/uploads/2017/04/netio132.zip
|
||||
unzip netio132.zip
|
||||
HERMIT_ISLE=qemu HERMIT_CPUS=2 HERMIT_KVM=0 HERMIT_VERBOSE=1 HERMIT_APP_PORT=18767 $PROXY $TDIR/benchmarks/netio &
|
||||
sleep 1
|
||||
chmod a+rx bin/linux-x86_64
|
||||
bin/linux-x86_64 -t -b 4k localhost
|
||||
sleep 1
|
||||
|
||||
# kill server
|
||||
kill $!
|
||||
|
|
|
@ -5,8 +5,9 @@ include(../cmake/HermitCore-Paths.cmake)
|
|||
|
||||
add_compile_options(-std=c99)
|
||||
|
||||
add_executable(proxy proxy.c uhyve.c uhyve-ibv.c uhyve-net.c)
|
||||
add_executable(proxy proxy.c utils.c uhyve.c uhyve-ibv.c uhyve-net.c)
|
||||
target_compile_options(proxy PUBLIC -pthread)
|
||||
target_compile_options(proxy PUBLIC -DMAX_ARGC_ENVC=${MAX_ARGC_ENVC})
|
||||
target_link_libraries(proxy pthread dl ibverbs)
|
||||
|
||||
install(TARGETS proxy
|
||||
|
|
|
@ -129,33 +129,13 @@ static void exit_handler(int sig)
|
|||
|
||||
static char* get_append_string(void)
|
||||
{
|
||||
char line[2048];
|
||||
char* match;
|
||||
char* point;
|
||||
uint32_t freq = get_cpufreq();
|
||||
if (freq == 0)
|
||||
return "-freq0 -proxy";
|
||||
|
||||
FILE* fp = fopen("/proc/cpuinfo", "r");
|
||||
if (!fp)
|
||||
return "-freq0";
|
||||
snprintf(cmdline, MAX_PATH, "\"-freq%u -proxy\"", freq);
|
||||
|
||||
while(fgets(line, 2048, fp)) {
|
||||
if ((match = strstr(line, "cpu MHz")) == NULL)
|
||||
continue;
|
||||
|
||||
// scan strinf for the next number
|
||||
for(; (*match < 0x30) || (*match > 0x39); match++)
|
||||
;
|
||||
|
||||
for(point = match; ((*point != '.') && (*point != '\0')); point++)
|
||||
;
|
||||
*point = '\0';
|
||||
|
||||
snprintf(cmdline, MAX_PATH, "\"-freq%s -proxy\"", match);
|
||||
fclose(fp);
|
||||
|
||||
return cmdline;
|
||||
}
|
||||
|
||||
return "-freq0";
|
||||
return cmdline;
|
||||
}
|
||||
|
||||
static int env_init(char *path)
|
||||
|
@ -316,7 +296,12 @@ static int qemu_init(char *path)
|
|||
char port_str[MAX_PATH];
|
||||
pid_t qemu_pid;
|
||||
char* qemu_str = "qemu-system-x86_64";
|
||||
char* qemu_argv[] = {qemu_str, "-daemonize", "-display", "none", "-smp", "1", "-m", "2G", "-pidfile", pidname, "-net", "nic,model=rtl8139", "-net", hostfwd, "-chardev", chardev_file, "-device", "pci-serial,chardev=gnc0", "-kernel", loader_path, "-initrd", path, "-append", get_append_string(), NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL};
|
||||
char* qemu_argv[] = {qemu_str, "-daemonize", "-display", "none", "-smp", "1",
|
||||
"-m", "2G", "-pidfile", pidname, "-net", "nic,model=rtl8139", "-net",
|
||||
hostfwd, "-chardev", chardev_file, "-device", "pci-serial,chardev=gnc0",
|
||||
"-kernel", loader_path, "-initrd", path, "-append", get_append_string(),
|
||||
"-no-acpi", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
|
||||
NULL, NULL, NULL, NULL};
|
||||
|
||||
str = getenv("HERMIT_CPUS");
|
||||
if (str)
|
||||
|
@ -1048,7 +1033,7 @@ int main(int argc, char **argv)
|
|||
|
||||
switch(monitor) {
|
||||
case UHYVE:
|
||||
return uhyve_loop();
|
||||
return uhyve_loop(argc, argv);
|
||||
|
||||
case BAREMETAL:
|
||||
case QEMU:
|
||||
|
|
|
@ -28,6 +28,12 @@
|
|||
#ifndef __PROXY_H__
|
||||
#define __PROXY_H__
|
||||
|
||||
#ifndef _GNU_SOURCE
|
||||
#define _GNU_SOURCE
|
||||
#endif
|
||||
|
||||
#include <unistd.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#define HERMIT_ELFOSABI 0x42
|
||||
|
@ -40,6 +46,10 @@
|
|||
#define __HERMIT_lseek 5
|
||||
|
||||
int uhyve_init(char *path);
|
||||
int uhyve_loop(void);
|
||||
int uhyve_loop(int argc, char **argv);
|
||||
|
||||
// define some helper functions
|
||||
uint32_t get_cpufreq(void);
|
||||
ssize_t pread_in_full(int fd, void *buf, size_t count, off_t offset);
|
||||
|
||||
#endif
|
||||
|
|
207
tools/uhyve.c
207
tools/uhyve.c
|
@ -32,7 +32,7 @@
|
|||
* remove memory limit
|
||||
*/
|
||||
|
||||
#define _GNU_SOURCE
|
||||
#define _GNU_SOURCE
|
||||
|
||||
#include <unistd.h>
|
||||
#include <stdio.h>
|
||||
|
@ -46,6 +46,7 @@
|
|||
#include <signal.h>
|
||||
#include <limits.h>
|
||||
#include <pthread.h>
|
||||
#include <semaphore.h>
|
||||
#include <elf.h>
|
||||
#include <err.h>
|
||||
#include <poll.h>
|
||||
|
@ -195,6 +196,30 @@ static pthread_barrier_t barrier;
|
|||
static __thread struct kvm_run *run = NULL;
|
||||
static __thread int vcpufd = -1;
|
||||
static __thread uint32_t cpuid = 0;
|
||||
static sem_t net_sem;
|
||||
|
||||
int uhyve_argc = -1;
|
||||
int uhyve_envc = -1;
|
||||
char **uhyve_argv = NULL;
|
||||
extern char **environ;
|
||||
char **uhyve_envp = NULL;
|
||||
|
||||
/* Ports and data structures for uhyve command line arguments and envp
|
||||
* forwarding */
|
||||
#define UHYVE_PORT_CMDSIZE 0x509
|
||||
#define UHYVE_PORT_CMDVAL 0x510
|
||||
|
||||
typedef struct {
|
||||
int argc;
|
||||
int argsz[MAX_ARGC_ENVC];
|
||||
int envc;
|
||||
int envsz[MAX_ARGC_ENVC];
|
||||
} __attribute__ ((packed)) uhyve_cmdsize_t;
|
||||
|
||||
typedef struct {
|
||||
char **argv;
|
||||
char **envp;
|
||||
} __attribute__ ((packed)) uhyve_cmdval_t;
|
||||
|
||||
|
||||
|
||||
|
@ -445,6 +470,16 @@ static void uhyve_exit(void* arg)
|
|||
close_fd(&vcpufd);
|
||||
}
|
||||
|
||||
static void dump_log(void)
|
||||
{
|
||||
if (klog && verbose)
|
||||
{
|
||||
fputs("\nDump kernel log:\n", stderr);
|
||||
fputs("================\n", stderr);
|
||||
fprintf(stderr, "%s\n", klog);
|
||||
}
|
||||
}
|
||||
|
||||
static void uhyve_atexit(void)
|
||||
{
|
||||
uhyve_exit(NULL);
|
||||
|
@ -462,87 +497,13 @@ static void uhyve_atexit(void)
|
|||
if (vcpu_fds)
|
||||
free(vcpu_fds);
|
||||
|
||||
if (klog && verbose)
|
||||
{
|
||||
fputs("\nDump kernel log:\n", stderr);
|
||||
fputs("================\n", stderr);
|
||||
fprintf(stderr, "%s\n", klog);
|
||||
}
|
||||
dump_log();
|
||||
|
||||
// clean up and close KVM
|
||||
close_fd(&vmfd);
|
||||
close_fd(&kvm);
|
||||
}
|
||||
|
||||
static uint32_t get_cpufreq(void)
|
||||
{
|
||||
char line[128];
|
||||
uint32_t freq = 0;
|
||||
char* match;
|
||||
|
||||
FILE* fp = fopen("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq", "r");
|
||||
if (fp != NULL) {
|
||||
if (fgets(line, sizeof(line), fp) != NULL) {
|
||||
// cpuinfo_max_freq is in kHz
|
||||
freq = (uint32_t) atoi(line) / 1000;
|
||||
}
|
||||
|
||||
fclose(fp);
|
||||
} else if( (fp = fopen("/proc/cpuinfo", "r")) ) {
|
||||
// Resorting to /proc/cpuinfo, however on most systems this will only
|
||||
// return the current frequency that might change over time.
|
||||
// Currently only needed when running inside a VM
|
||||
|
||||
// read until we find the line indicating cpu frequency
|
||||
while(fgets(line, sizeof(line), fp) != NULL) {
|
||||
match = strstr(line, "cpu MHz");
|
||||
|
||||
if(match != NULL) {
|
||||
// advance pointer to beginning of number
|
||||
while( ((*match < '0') || (*match > '9')) && (*match != '\0') )
|
||||
match++;
|
||||
|
||||
freq = (uint32_t) atoi(match);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
fclose(fp);
|
||||
}
|
||||
|
||||
return freq;
|
||||
}
|
||||
|
||||
static ssize_t pread_in_full(int fd, void *buf, size_t count, off_t offset)
|
||||
{
|
||||
ssize_t total = 0;
|
||||
char *p = buf;
|
||||
|
||||
if (count > SSIZE_MAX) {
|
||||
errno = E2BIG;
|
||||
return -1;
|
||||
}
|
||||
|
||||
while (count > 0) {
|
||||
ssize_t nr;
|
||||
|
||||
nr = pread(fd, p, count, offset);
|
||||
if (nr == 0)
|
||||
return total;
|
||||
else if (nr == -1 && errno == EINTR)
|
||||
continue;
|
||||
else if (nr == -1)
|
||||
return -1;
|
||||
|
||||
count -= nr;
|
||||
total += nr;
|
||||
p += nr;
|
||||
offset += nr;
|
||||
}
|
||||
|
||||
return total;
|
||||
}
|
||||
|
||||
static int load_kernel(uint8_t* mem, char* path)
|
||||
{
|
||||
Elf64_Ehdr hdr;
|
||||
|
@ -656,9 +617,7 @@ static int load_kernel(uint8_t* mem, char* path)
|
|||
*((uint8_t*) (mem+paddr-GUEST_OFFSET + 0xBB)) = (uint8_t) ip[3];
|
||||
}
|
||||
|
||||
// TODO: Compiler Warning
|
||||
*((uint64_t*) (mem+paddr-GUEST_OFFSET + 0xBC)) = guest_mem; // host-virtual start address (kernel_start_host)
|
||||
|
||||
*((uint64_t*) (mem+paddr-GUEST_OFFSET + 0xbc)) = guest_mem; // host-virtual start address (kernel_start_host)
|
||||
}
|
||||
*((uint64_t*) (mem+paddr-GUEST_OFFSET + 0x38)) += memsz; // total kernel size
|
||||
}
|
||||
|
@ -989,6 +948,7 @@ static void* wait_for_packet(void* arg)
|
|||
else if (ret) {
|
||||
uint64_t event_counter = 1;
|
||||
write(efd, &event_counter, sizeof(event_counter));
|
||||
sem_wait(&net_sem);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1006,6 +966,8 @@ static inline void check_network(void)
|
|||
irqfd.gsi = UHYVE_IRQ;
|
||||
kvm_ioctl(vmfd, KVM_IRQFD, &irqfd);
|
||||
|
||||
sem_init(&net_sem, 0, 0);
|
||||
|
||||
if (pthread_create(&net_thread, NULL, wait_for_packet, NULL))
|
||||
err(1, "unable to create thread");
|
||||
}
|
||||
|
@ -1130,7 +1092,10 @@ static int vcpu_loop(void)
|
|||
if (ret > 0) {
|
||||
uhyve_netread->len = ret;
|
||||
uhyve_netread->ret = 0;
|
||||
} else uhyve_netread->ret = -1;
|
||||
} else {
|
||||
uhyve_netread->ret = -1;
|
||||
sem_post(&net_sem);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1153,6 +1118,41 @@ static int vcpu_loop(void)
|
|||
break;
|
||||
}
|
||||
|
||||
case UHYVE_PORT_CMDSIZE: {
|
||||
int i;
|
||||
unsigned data = *((unsigned*)((size_t)run+run->io.data_offset));
|
||||
uhyve_cmdsize_t *val = (uhyve_cmdsize_t *) (guest_mem+data);
|
||||
|
||||
val->argc = uhyve_argc;
|
||||
for(i=0; i<uhyve_argc; i++)
|
||||
val->argsz[i] = strlen(uhyve_argv[i]) + 1;
|
||||
|
||||
val->envc = uhyve_envc;
|
||||
for(i=0; i<uhyve_envc; i++)
|
||||
val->envsz[i] = strlen(uhyve_envp[i]) + 1;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
case UHYVE_PORT_CMDVAL: {
|
||||
int i;
|
||||
char **argv_ptr, **env_ptr;
|
||||
unsigned data = *((unsigned*)((size_t)run+run->io.data_offset));
|
||||
uhyve_cmdval_t *val = (uhyve_cmdval_t *) (guest_mem+data);
|
||||
|
||||
/* argv */
|
||||
argv_ptr = (char **)(guest_mem + (size_t)val->argv);
|
||||
for(i=0; i<uhyve_argc; i++)
|
||||
strcpy(guest_mem + (size_t)argv_ptr[i], uhyve_argv[i]);
|
||||
|
||||
/* env */
|
||||
env_ptr = (char **)(guest_mem + (size_t)val->envp);
|
||||
for(i=0; i<uhyve_envc; i++)
|
||||
strcpy(guest_mem + (size_t)env_ptr[i], uhyve_envp[i]);
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
case UHYVE_PORT_KERNEL_IBV_LOG: {
|
||||
unsigned data = *((unsigned*)((size_t)run+run->io.data_offset));
|
||||
char* str = (char*) (guest_mem + data);
|
||||
|
@ -1198,11 +1198,13 @@ static int vcpu_loop(void)
|
|||
break;
|
||||
|
||||
case KVM_EXIT_SHUTDOWN:
|
||||
err(1, "KVM: receive shutdown command\n");
|
||||
break;
|
||||
fprintf(stderr, "KVM: receive shutdown command\n");
|
||||
|
||||
case KVM_EXIT_DEBUG:
|
||||
print_registers();
|
||||
dump_log();
|
||||
exit(EXIT_FAILURE);
|
||||
|
||||
default:
|
||||
fprintf(stderr, "KVM: unhandled exit: exit_reason = 0x%x\n", run->exit_reason);
|
||||
exit(EXIT_FAILURE);
|
||||
|
@ -1288,9 +1290,21 @@ static int vcpu_init(void)
|
|||
kvm_ioctl(vcpufd, KVM_SET_XSAVE, &xsave);
|
||||
kvm_ioctl(vcpufd, KVM_SET_VCPU_EVENTS, &events);
|
||||
} else {
|
||||
struct {
|
||||
struct kvm_msrs info;
|
||||
struct kvm_msr_entry entries[MAX_MSR_ENTRIES];
|
||||
} msr_data;
|
||||
struct kvm_msr_entry *msrs = msr_data.entries;
|
||||
|
||||
// be sure that the multiprocessor is runable
|
||||
kvm_ioctl(vcpufd, KVM_SET_MP_STATE, &mp_state);
|
||||
|
||||
// enable fast string operations
|
||||
msrs[0].index = MSR_IA32_MISC_ENABLE;
|
||||
msrs[0].data = 1;
|
||||
msr_data.info.nmsrs = 1;
|
||||
kvm_ioctl(vcpufd, KVM_SET_MSRS, &msr_data);
|
||||
|
||||
/* Setup registers and memory. */
|
||||
setup_system(vcpufd, guest_mem, cpuid);
|
||||
kvm_ioctl(vcpufd, KVM_SET_REGS, ®s);
|
||||
|
@ -1792,10 +1806,35 @@ nextslot:
|
|||
no_checkpoint++;
|
||||
}
|
||||
|
||||
int uhyve_loop(void)
|
||||
int uhyve_loop(int argc, char **argv)
|
||||
{
|
||||
const char* hermit_check = getenv("HERMIT_CHECKPOINT");
|
||||
int ts = 0;
|
||||
int ts = 0, i = 0;
|
||||
|
||||
/* argv[0] is 'proxy', do not count it */
|
||||
uhyve_argc = argc-1;
|
||||
uhyve_argv = &argv[1];
|
||||
uhyve_envp = environ;
|
||||
while(uhyve_envp[i] != NULL)
|
||||
i++;
|
||||
uhyve_envc = i;
|
||||
|
||||
if (uhyve_argc > MAX_ARGC_ENVC) {
|
||||
fprintf(stderr, "uhyve downsiize envc from %d to %d\n", uhyve_argc, MAX_ARGC_ENVC);
|
||||
uhyve_argc = MAX_ARGC_ENVC;
|
||||
}
|
||||
|
||||
if (uhyve_envc > MAX_ARGC_ENVC-1) {
|
||||
fprintf(stderr, "uhyve downsiize envc from %d to %d\n", uhyve_envc, MAX_ARGC_ENVC-1);
|
||||
uhyve_envc = MAX_ARGC_ENVC-1;
|
||||
}
|
||||
|
||||
if(uhyve_argc > MAX_ARGC_ENVC || uhyve_envc > MAX_ARGC_ENVC) {
|
||||
fprintf(stderr, "uhyve cannot forward more than %d command line "
|
||||
"arguments or environment variables, please consider increasing "
|
||||
"the MAX_ARGC_ENVP cmake argument\n", MAX_ARGC_ENVC);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (hermit_check)
|
||||
ts = atoi(hermit_check);
|
||||
|
|
171
tools/utils.c
Normal file
171
tools/utils.c
Normal file
|
@ -0,0 +1,171 @@
|
|||
/*
|
||||
* Copyright (c) 2017, Stefan Lankes, RWTH Aachen University
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* * Neither the name of the University nor the names of its contributors
|
||||
* may be used to endorse or promote products derived from this
|
||||
* software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
|
||||
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#define _GNU_SOURCE
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
#include <errno.h>
|
||||
#include <limits.h>
|
||||
|
||||
#include "proxy.h"
|
||||
|
||||
inline static void __cpuid(uint32_t code, uint32_t* a, uint32_t* b, uint32_t* c, uint32_t* d)
|
||||
{
|
||||
__asm volatile ("cpuid" : "=a"(*a), "=b"(*b), "=c"(*c), "=d"(*d) : "0"(code), "2"(*c));
|
||||
}
|
||||
|
||||
// Try to determine the frequency from the CPU brand.
|
||||
// Code is derived from the manual "Intel Processor
|
||||
// Identification and the CPUID Instruction".
|
||||
static uint32_t get_frequency_from_brand(void)
|
||||
{
|
||||
char cpu_brand[4*3*sizeof(uint32_t)+1] = {[0 ... 4*3*sizeof(uint32_t)] = 0};
|
||||
uint32_t* bint = (uint32_t*) cpu_brand;
|
||||
uint32_t index, multiplier = 0;
|
||||
uint32_t cpu_freq = 0;
|
||||
uint32_t extended;
|
||||
|
||||
__cpuid(0x80000000, &extended, bint+1, bint+2, bint+3);
|
||||
if (extended < 0x80000004)
|
||||
return 0;
|
||||
|
||||
__cpuid(0x80000002, bint+0, bint+1, bint+2, bint+3);
|
||||
__cpuid(0x80000003, bint+4, bint+5, bint+6, bint+7);
|
||||
__cpuid(0x80000004, bint+8, bint+9, bint+10, bint+11);
|
||||
|
||||
for(index=0; index<sizeof(cpu_brand)-2; index++)
|
||||
{
|
||||
if ((cpu_brand[index+1] == 'H') && (cpu_brand[index+2] == 'z'))
|
||||
{
|
||||
if (cpu_brand[index] == 'M')
|
||||
multiplier = 1;
|
||||
else if (cpu_brand[index] == 'G')
|
||||
multiplier = 1000;
|
||||
else if (cpu_brand[index] == 'T')
|
||||
multiplier = 1000000;
|
||||
}
|
||||
|
||||
if (multiplier > 0) {
|
||||
uint32_t freq;
|
||||
|
||||
// Compute frequency (in MHz) from brand string
|
||||
if (cpu_brand[index-3] == '.') { // If format is “x.xx”
|
||||
freq = (uint32_t)(cpu_brand[index-4] - '0') * multiplier;
|
||||
freq += (uint32_t)(cpu_brand[index-2] - '0') * (multiplier / 10);
|
||||
freq += (uint32_t)(cpu_brand[index-1] - '0') * (multiplier / 100);
|
||||
} else { // If format is xxxx
|
||||
freq = (uint32_t)(cpu_brand[index-4] - '0') * 1000;
|
||||
freq += (uint32_t)(cpu_brand[index-3] - '0') * 100;
|
||||
freq += (uint32_t)(cpu_brand[index-2] - '0') * 10;
|
||||
freq += (uint32_t)(cpu_brand[index-1] - '0');
|
||||
freq *= multiplier;
|
||||
}
|
||||
|
||||
return freq;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t get_cpufreq(void)
|
||||
{
|
||||
char line[128];
|
||||
uint32_t freq = 0;
|
||||
char* match;
|
||||
|
||||
freq = get_frequency_from_brand();
|
||||
if (freq > 0)
|
||||
return freq;
|
||||
|
||||
// TODO: fallback solution, on some systems is cpuinfo_max_freq the turbo frequency
|
||||
// => wrong value
|
||||
FILE* fp = fopen("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq", "r");
|
||||
if (fp != NULL) {
|
||||
if (fgets(line, sizeof(line), fp) != NULL) {
|
||||
// cpuinfo_max_freq is in kHz
|
||||
freq = (uint32_t) atoi(line) / 1000;
|
||||
}
|
||||
|
||||
fclose(fp);
|
||||
} else if( (fp = fopen("/proc/cpuinfo", "r")) ) {
|
||||
// Resorting to /proc/cpuinfo, however on most systems this will only
|
||||
// return the current frequency that might change over time.
|
||||
// Currently only needed when running inside a VM
|
||||
|
||||
// read until we find the line indicating cpu frequency
|
||||
while(fgets(line, sizeof(line), fp) != NULL) {
|
||||
match = strstr(line, "cpu MHz");
|
||||
|
||||
if(match != NULL) {
|
||||
// advance pointer to beginning of number
|
||||
while( ((*match < '0') || (*match > '9')) && (*match != '\0') )
|
||||
match++;
|
||||
|
||||
freq = (uint32_t) atoi(match);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
fclose(fp);
|
||||
}
|
||||
|
||||
return freq;
|
||||
}
|
||||
|
||||
ssize_t pread_in_full(int fd, void *buf, size_t count, off_t offset)
|
||||
{
|
||||
ssize_t total = 0;
|
||||
char *p = buf;
|
||||
|
||||
if (count > SSIZE_MAX) {
|
||||
errno = E2BIG;
|
||||
return -1;
|
||||
}
|
||||
|
||||
while (count > 0) {
|
||||
ssize_t nr;
|
||||
|
||||
nr = pread(fd, p, count, offset);
|
||||
if (nr == 0)
|
||||
return total;
|
||||
else if (nr == -1 && errno == EINTR)
|
||||
continue;
|
||||
else if (nr == -1)
|
||||
return -1;
|
||||
|
||||
count -= nr;
|
||||
total += nr;
|
||||
p += nr;
|
||||
offset += nr;
|
||||
}
|
||||
|
||||
return total;
|
||||
}
|
|
@ -5,6 +5,7 @@ project(hermit_tests C CXX Fortran Go)
|
|||
|
||||
add_executable(hello hello.c)
|
||||
add_executable(jacobi jacobi.c)
|
||||
add_executable(argv_envp argv_envp.c)
|
||||
add_executable(hello++ hello++.cpp)
|
||||
add_executable(hellof hellof.f90)
|
||||
add_executable(pi pi.go)
|
||||
|
|
21
usr/tests/argv_envp.c
Normal file
21
usr/tests/argv_envp.c
Normal file
|
@ -0,0 +1,21 @@
|
|||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <unistd.h>
|
||||
|
||||
extern char **environ;
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
int i;
|
||||
|
||||
printf("argc: %d\n", argc);
|
||||
for(i=0; i<argc; i++) {
|
||||
printf(" argv[%d]: %s\n", i, argv[i]);
|
||||
}
|
||||
|
||||
printf("environ: %x\n", environ);
|
||||
i = 0;
|
||||
while(environ[i] != NULL)
|
||||
printf("%s\n", environ[i++]);
|
||||
|
||||
return 0;
|
||||
}
|
Loading…
Add table
Reference in a new issue