1
0
Fork 0
mirror of https://github.com/hermitcore/libhermit.git synced 2025-03-09 00:00:03 +01:00

working on ping pong start

This commit is contained in:
Annika Wierichs 2017-11-15 17:37:29 +01:00
parent 77aea478fb
commit 205ee63d6e
11 changed files with 110 additions and 126 deletions

View file

@ -75,6 +75,7 @@ align 4
global hcip
global hcgateway
global hcmask
global kernel_start_host
base dq 0
limit dq 0
cpu_freq dd 0
@ -105,7 +106,7 @@ align 4
hcip db 10,0,5,2
hcgateway db 10,0,5,1
hcmask db 255,255,255,0
kernel_start_host dq 0
; Bootstrap page tables are used during the initialization.
align 4096
boot_pml4:

View file

@ -250,9 +250,6 @@ int memory_init(void)
{
int ret = 0;
// guest_mem_workaround
uhyve_send(UHYVE_PORT_KERNEL_START, (unsigned) guest_to_host((size_t) &host_kernel_start));
// enable paging and map Multiboot modules etc.
ret = page_init();
if (BUILTIN_EXPECT(ret, 0)) {

View file

@ -28,14 +28,16 @@
*
*/
#ifndef __IBV_GUEST_HOST_H__
#define __IBV_GUEST_HOST_H__
#include <hermit/verbs.h>
extern uint8_t * host_kernel_start;
extern uint8_t * kernel_start_host;
inline size_t guest_to_host(size_t address) {
return virt_to_phys(address) + host_kernel_start;
return virt_to_phys(address) + (size_t) kernel_start_host;
}
@ -43,6 +45,7 @@ struct ibv_device * guest_to_host_ibv_device(struct ibv_device * device);
struct ibv_context * guest_to_host_ibv_context(struct ibv_context * context);
struct ibv_context_ops * guest_to_host_ibv_context_ops(struct ibv_context_ops * context_ops);
struct ibv_port_attr * guest_to_host_ibv_port_attr(struct ibv_port_attr * port_attr);
struct ibv_comp_channel * guest_to_host_ibv_comp_channel(struct ibv_comp_channel * channel);
struct ibv_abi_compat_v2 * guest_to_host_ibv_abi_compat_v2(struct ibv_abi_compat_v2 * abi_compat);
void host_to_guest_ibv_device(struct ibv_device * device);
@ -51,3 +54,4 @@ void host_to_guest_ibv_context_ops(struct ibv_context_ops * context_ops);
void host_to_guest_ibv_port_attr(struct ibv_port_attr * port_attr);
void host_to_guest_ibv_abi_compat_v2(struct ibv_abi_compat_v2 * abi_compat);
#endif // __IBV_GUEST_HOST_H__

View file

@ -66,14 +66,12 @@ extern const size_t image_size;
#define UHYVE_PORT_EXIT 0x503
#define UHYVE_PORT_LSEEK 0x504
#define UHYVE_PORT_KERNEL_START 0x505
// InfiniBand uhyve port IDs
#define UHYVE_PORT_IBV_OPEN_DEVICE 0x510,
//#define UHYVE_PORT_IBV_GET_DEVICE_LIST 0x511,
#define UHYVE_PORT_IBV_GET_DEVICE_NAME 0x512,
#define UHYVE_PORT_IBV_QUERY_PORT 0x513,
#define UHYVE_PORT_IBV_CREATE_COMP_CHANNEL 0x514,
#define UHYVE_PORT_IBV_OPEN_DEVICE 0x510
//#define UHYVE_PORT_IBV_GET_DEVICE_LIST 0x511
#define UHYVE_PORT_IBV_GET_DEVICE_NAME 0x512
#define UHYVE_PORT_IBV_QUERY_PORT 0x513
#define UHYVE_PORT_IBV_CREATE_COMP_CHANNEL 0x514
#define BUILTIN_EXPECT(exp, b) __builtin_expect((exp), (b))

View file

@ -114,6 +114,8 @@ struct timespec {
__kernel_time_t tv_sec; /* seconds */
long tv_nsec; /* nanoseconds */
};
#endif
@ -1122,6 +1124,14 @@ struct ibv_comp_channel {
int refcnt;
};
#ifdef __KERNEL__
// Only in ibverbs.h
struct ibv_abi_compat_v2 {
struct ibv_comp_channel channel;
pthread_mutex_t in_use;
};
#endif // __KERNEL__
struct ibv_cq {
struct ibv_context *context;
struct ibv_comp_channel *channel;

View file

@ -36,7 +36,7 @@
#include <hermit/stdlib.h>
#include <hermit/ibv.h>
#include <hermit/ibv_virt_phys.h>
#include <hermit/ibv_guest_host.h>
// TODO: Can/should we separate ibv_get_device_list into two KVM exit IOs to

View file

@ -31,42 +31,19 @@
#include <asm/page.h>
#include <hermit/ibv_virt_phys.h>
#include <hermit/ibv_guest_host.h>
/*
* struct ibv_device
*/
static struct {
char * name;
char * dev_name;
char * dev_path;
char * ibdev_path;
} ibv_device_virt_ptrs; // TODO: Proper naming
struct ibv_device * guest_to_host_ibv_device(struct ibv_device * device) {
ibv_device_virt_ptrs.name = device->name;;
ibv_device_virt_ptrs.dev_name = device->dev_name;
ibv_device_virt_ptrs.dev_path = device->dev_path;
ibv_device_virt_ptrs.ibdev_path = device->ibdev_path;
device->name = (char *) guest_to_host((size_t) device->name);
device->dev_name = (char *) guest_to_host((size_t) device->dev_name);
device->dev_path = (char *) guest_to_host((size_t) device->dev_path);
device->ibdev_path = (char *) guest_to_host((size_t) device->ibdev_path);
// _ops obsolete.
return (struct ibv_device *) guest_to_host((size_t) device);
}
void host_to_guest_ibv_device(struct ibv_device * device) {
device->name = ibv_device_virt_ptrs.name;
device->dev_name = ibv_device_virt_ptrs.dev_name;
device->dev_path = ibv_device_virt_ptrs.dev_path;
device->ibdev_path = ibv_device_virt_ptrs.ibdev_path;
// _ops obsolete.
}
void host_to_guest_ibv_device(struct ibv_device * device) { }
/*
@ -140,38 +117,38 @@ static struct {
} ibv_context_ops_virt_ptrs;
struct ibv_context_ops * guest_to_host_ibv_context_ops(struct ibv_context_ops * context_ops) {
ibv_context_virt_ptrs.query_device = context_ops->query_device;
ibv_context_virt_ptrs.query_port = context_ops->query_port;
ibv_context_virt_ptrs.alloc_pd = context_ops->alloc_pd;
ibv_context_virt_ptrs.dealloc_pd = context_ops->dealloc_pd;
ibv_context_virt_ptrs.reg_mr = context_ops->reg_mr;
ibv_context_virt_ptrs.rereg_mr = context_ops->rereg_mr;
ibv_context_virt_ptrs.dereg_mr = context_ops->dereg_mr;
ibv_context_virt_ptrs.alloc_mw = context_ops->alloc_mw;
ibv_context_virt_ptrs.bind_mw = context_ops->bind_mw;
ibv_context_virt_ptrs.dealloc_mw = context_ops->dealloc_mw;
ibv_context_virt_ptrs.create_cq = context_ops->create_cq;
ibv_context_virt_ptrs.poll_cq = context_ops->poll_cq;
ibv_context_virt_ptrs.req_notify_cq = context_ops->req_notify_cq;
ibv_context_virt_ptrs.cq_event = context_ops->cq_event;
ibv_context_virt_ptrs.resize_cq = context_ops->resize_cq;
ibv_context_virt_ptrs.destroy_cq = context_ops->destroy_cq;
ibv_context_virt_ptrs.create_srq = context_ops->create_srq;
ibv_context_virt_ptrs.modify_srq = context_ops->modify_srq;
ibv_context_virt_ptrs.query_srq = context_ops->query_srq;
ibv_context_virt_ptrs.destroy_srq = context_ops->destroy_srq;
ibv_context_virt_ptrs.post_srq_recv = context_ops->post_srq_recv;
ibv_context_virt_ptrs.create_qp = context_ops->create_qp;
ibv_context_virt_ptrs.query_qp = context_ops->query_qp;
ibv_context_virt_ptrs.modify_qp = context_ops->modify_qp;
ibv_context_virt_ptrs.destroy_qp = context_ops->destroy_qp;
ibv_context_virt_ptrs.post_send = context_ops->post_send;
ibv_context_virt_ptrs.post_recv = context_ops->post_recv;
ibv_context_virt_ptrs.create_ah = context_ops->create_ah;
ibv_context_virt_ptrs.destroy_ah = context_ops->destroy_ah;
ibv_context_virt_ptrs.attach_mcast = context_ops->attach_mcast;
ibv_context_virt_ptrs.detach_mcast = context_ops->detach_mcast;
ibv_context_virt_ptrs.async_event = context_ops->async_event;
ibv_context_ops_virt_ptrs.query_device = context_ops->query_device;
ibv_context_ops_virt_ptrs.query_port = context_ops->query_port;
ibv_context_ops_virt_ptrs.alloc_pd = context_ops->alloc_pd;
ibv_context_ops_virt_ptrs.dealloc_pd = context_ops->dealloc_pd;
ibv_context_ops_virt_ptrs.reg_mr = context_ops->reg_mr;
ibv_context_ops_virt_ptrs.rereg_mr = context_ops->rereg_mr;
ibv_context_ops_virt_ptrs.dereg_mr = context_ops->dereg_mr;
ibv_context_ops_virt_ptrs.alloc_mw = context_ops->alloc_mw;
ibv_context_ops_virt_ptrs.bind_mw = context_ops->bind_mw;
ibv_context_ops_virt_ptrs.dealloc_mw = context_ops->dealloc_mw;
ibv_context_ops_virt_ptrs.create_cq = context_ops->create_cq;
ibv_context_ops_virt_ptrs.poll_cq = context_ops->poll_cq;
ibv_context_ops_virt_ptrs.req_notify_cq = context_ops->req_notify_cq;
ibv_context_ops_virt_ptrs.cq_event = context_ops->cq_event;
ibv_context_ops_virt_ptrs.resize_cq = context_ops->resize_cq;
ibv_context_ops_virt_ptrs.destroy_cq = context_ops->destroy_cq;
ibv_context_ops_virt_ptrs.create_srq = context_ops->create_srq;
ibv_context_ops_virt_ptrs.modify_srq = context_ops->modify_srq;
ibv_context_ops_virt_ptrs.query_srq = context_ops->query_srq;
ibv_context_ops_virt_ptrs.destroy_srq = context_ops->destroy_srq;
ibv_context_ops_virt_ptrs.post_srq_recv = context_ops->post_srq_recv;
ibv_context_ops_virt_ptrs.create_qp = context_ops->create_qp;
ibv_context_ops_virt_ptrs.query_qp = context_ops->query_qp;
ibv_context_ops_virt_ptrs.modify_qp = context_ops->modify_qp;
ibv_context_ops_virt_ptrs.destroy_qp = context_ops->destroy_qp;
ibv_context_ops_virt_ptrs.post_send = context_ops->post_send;
ibv_context_ops_virt_ptrs.post_recv = context_ops->post_recv;
ibv_context_ops_virt_ptrs.create_ah = context_ops->create_ah;
ibv_context_ops_virt_ptrs.destroy_ah = context_ops->destroy_ah;
ibv_context_ops_virt_ptrs.attach_mcast = context_ops->attach_mcast;
ibv_context_ops_virt_ptrs.detach_mcast = context_ops->detach_mcast;
ibv_context_ops_virt_ptrs.async_event = context_ops->async_event;
// TODO: Does this work? Fcn returns size_t. Have to convert?
context_ops->query_device = guest_to_host((size_t) context_ops->query_device);
@ -257,6 +234,29 @@ struct ibv_port_attr * guest_to_host_ibv_port_attr(struct ibv_port_attr * port_a
void host_to_guest_ibv_port_attr(struct ibv_port_attr * port_attr) {}
/*
* struct ibv_comp_channel
*/
static struct {
struct ibv_context * context;
} ibv_comp_channel_virt_ptrs;
struct ibv_comp_channel * guest_to_host_ibv_comp_channel(struct ibv_comp_channel * channel) {
ibv_comp_channel_virt_ptrs.context = channel->context,
channel->context = guest_to_host_ibv_context(channel->context);
return (struct ibv_comp_channel *) guest_to_host((size_t) channel);
}
void host_to_guest_ibv_comp_channel(struct ibv_comp_channel * channel) {
channel->context = ibv_comp_channel_virt_ptrs.context;
host_to_guest_ibv_context(channel->context);
}
/*
* struct ibv_abi_compat_v2
*/
@ -272,29 +272,3 @@ void host_to_guest_ibv_abi_compat_v2(struct ibv_abi_compat_v2 * abi_compat) {
host_to_guest_ibv_comp_channel(&abi_compat->channel);
host_to_guest_pthread_mutex_t(&abi_compat->in_use);
}
/*
* struct ibv_comp_channel
*/
static struct {
struct ibv_context * context;
} ibv_comp_channel_virt_ptrs;
struct ibv_comp_channel * guest_to_host_ibv_comp_channel(struct ibv_comp_channel * channel) {
ibv_comp_channel_virt_ptrs.context = channel->context,
channel->context = guest_to_host_ibv_device(channel->context);
return (struct ibv_comp_channel *) guest_to_host((size_t) channel);
}
void host_to_guest_ibv_comp_channel(struct ibv_comp_channel * channel) {
channel->context = ibv_comp_channel_virt_ptrs.context;
host_to_guest_ibv_device(channel->context);
}

View file

@ -5,7 +5,7 @@ include(../cmake/HermitCore-Paths.cmake)
add_compile_options(-std=c99)
add_executable(proxy proxy.c uhyve.c uhyve-net.c)
add_executable(proxy proxy.c uhyve.c uhyve-ibv.c uhyve-net.c)
target_compile_options(proxy PUBLIC -pthread)
target_link_libraries(proxy pthread ibverbs)

View file

@ -329,7 +329,7 @@ def generate_port_macros(function_names):
"""
macros = ""
for num, function_name in enumerate(function_names, PORT_NUMBER_START):
macros += "\n#define UHYVE_PORT_{0} 0x{1},".format(function_name.upper(),
macros += "\n#define UHYVE_PORT_{0} 0x{1}".format(function_name.upper(),
format(num, "X"))
return macros

View file

@ -24,8 +24,6 @@
#define MAX_NUM_OF_IBV_DEVICES 16
typedef enum {
UHYVE_PORT_KERNEL_START = 0x505,
UHYVE_PORT_IBV_OPEN_DEVICE = 0x510,
//UHYVE_PORT_IBV_GET_DEVICE_LIST = 0x511,
UHYVE_PORT_IBV_GET_DEVICE_NAME = 0x512,
@ -78,3 +76,12 @@ typedef struct {
//struct ibv_device *dev_phys_ptr_list[MAX_NUM_OF_IBV_DEVICES];
////struct ibv_device **device_list;
//} __attribute__((packed)) uhyve_ibv_get_device_list_t;
void call_ibv_open_device(struct kvm_run * run);
void call_ibv_get_device_name(struct kvm_run * run);
void call_ibv_query_port(struct kvm_run * run);
void call_ibv_create_comp_channel(struct kvm_run * run);
/*void call_ibv_get_device_list(struct kvm_run * run, uint8_t * guest_mem);*/
#endif // UHYVE_IBV_H

View file

@ -431,14 +431,14 @@ static int load_kernel(uint8_t* mem, char* path)
if (first_load) {
first_load = 0;
// initialize kernel
*((uint64_t*) (mem+paddr-GUEST_OFFSET + 0x08)) = paddr; // physical start address
*((uint64_t*) (mem+paddr-GUEST_OFFSET + 0x10)) = guest_size; // physical limit
*((uint32_t*) (mem+paddr-GUEST_OFFSET + 0x18)) = get_cpufreq();
*((uint32_t*) (mem+paddr-GUEST_OFFSET + 0x24)) = 1; // number of used cpus
*((uint32_t*) (mem+paddr-GUEST_OFFSET + 0x30)) = 0; // apicid
*((uint32_t*) (mem+paddr-GUEST_OFFSET + 0x60)) = 1; // numa nodes
*((uint32_t*) (mem+paddr-GUEST_OFFSET + 0x94)) = 1; // announce uhyve
// initialize kernel (corresponding asm vars given in parenthesis)
*((uint64_t*) (mem+paddr-GUEST_OFFSET + 0x08)) = paddr; // physical start address (base)
*((uint64_t*) (mem+paddr-GUEST_OFFSET + 0x10)) = guest_size; // physical limit (limit)
*((uint32_t*) (mem+paddr-GUEST_OFFSET + 0x18)) = get_cpufreq(); // (cpu_freq)
*((uint32_t*) (mem+paddr-GUEST_OFFSET + 0x24)) = 1; // number of used cpus (possible_cpus)
*((uint32_t*) (mem+paddr-GUEST_OFFSET + 0x30)) = 0; // apicid (current_boot_id)
*((uint32_t*) (mem+paddr-GUEST_OFFSET + 0x60)) = 1; // numa nodes (possible_isles)
*((uint32_t*) (mem+paddr-GUEST_OFFSET + 0x94)) = 1; // announce uhyve (uhyve)
char* str = getenv("HERMIT_IP");
@ -473,6 +473,9 @@ static int load_kernel(uint8_t* mem, char* path)
*((uint8_t*) (mem+paddr-GUEST_OFFSET + 0xBB)) = (uint8_t) ip[3];
}
// TODO: Compiler Warning
*((uint64_t*) (mem+paddr-GUEST_OFFSET + 0xBC)) = guest_mem; // host-virtual start address (kernel_start_host)
}
*((uint64_t*) (mem+paddr-GUEST_OFFSET + 0x38)) += memsz; // total kernel size
}
@ -868,13 +871,6 @@ static int vcpu_loop(void)
case KVM_EXIT_IO:
//printf("port 0x%x\n", run->io.port);
switch (run->io.port) {
case UHYVE_PORT_KERNEL_START: {
unsigned data = *((unsigned*)((size_t)run+run->io.data_offset));
uint8_t ** ret = (uint8_t **) (guest_mem+data);
&ret = guest_mem;
printf("Guest mem in uhyve: %p", guest_mem);
break;
}
case UHYVE_PORT_WRITE: {
unsigned data = *((unsigned*)((size_t)run+run->io.data_offset));
@ -976,20 +972,17 @@ static int vcpu_loop(void)
// InfiniBand
case UHYVE_PORT_IBV_OPEN_DEVICE:
call_ibv_open_device(run, guest_mem);
call_ibv_open_device(run);
break;
case UHYVE_PORT_IBV_GET_DEVICE_NAME:
call_ibv_get_device_name(run, guest_mem);
call_ibv_get_device_name(run);
break;
case UHYVE_PORT_IBV_QUERY_PORT:
call_ibv_query_port(run, guest_mem);
call_ibv_query_port(run);
break;
case UHYVE_PORT_IBV_CREATE_COMP_CHANNEL:
call_ibv_create_comp_channel(run, guest_mem);
call_ibv_create_comp_channel(run);
break;
/*case UHYVE_PORT_IBV_GET_DEVICE_LIST:*/
/*call_ibv_get_device_list(run, guest_mem);*/
/*break;*/
default:
err(1, "KVM: unhandled KVM_EXIT_IO at port 0x%x, direction %d\n", run->io.port, run->io.direction);
@ -1312,9 +1305,9 @@ int uhyve_init(char *path)
if (guest_mem == MAP_FAILED)
err(1, "mmap failed");
/*
* We mprotect the gap PROT_NONE so that if we accidently write to it, we will know.
*/
/*
* We mprotect the gap PROT_NONE so that if we accidently write to it, we will know.
*/
mprotect(guest_mem + KVM_32BIT_GAP_START, KVM_32BIT_GAP_SIZE, PROT_NONE);
}