From 69f2b1cef58ec30da2a9c41716280c4e441df605 Mon Sep 17 00:00:00 2001 From: Annika Wierichs Date: Fri, 10 Nov 2017 15:54:19 +0100 Subject: [PATCH] Started writing hypervisor part. --- ...sion.h => ibv_struct_address_conversion.h} | 13 +- kernel/ibv.c | 9 +- kernel/ibv_struct_address_conversion.c | 259 ++++++++++++++++++ tools/uhyve-ibv.c | 23 +- 4 files changed, 292 insertions(+), 12 deletions(-) rename include/hermit/{ibv_struct_member_address_conversion.h => ibv_struct_address_conversion.h} (74%) create mode 100644 kernel/ibv_struct_address_conversion.c diff --git a/include/hermit/ibv_struct_member_address_conversion.h b/include/hermit/ibv_struct_address_conversion.h similarity index 74% rename from include/hermit/ibv_struct_member_address_conversion.h rename to include/hermit/ibv_struct_address_conversion.h index e595460dd..bdd97c6d1 100644 --- a/include/hermit/ibv_struct_member_address_conversion.h +++ b/include/hermit/ibv_struct_address_conversion.h @@ -32,8 +32,13 @@ #include -void virt_to_phys_ibv_device(struct ibv_device * device); -void virt_to_phys_ibv_context(struct ibv_contect * context); -void virt_to_phys_ibv_context_ops(struct ibv_context_ops * context_ops); -void virt_to_phys_ibv_port_attr(struct ibv_port_attr * port_attr); +struct ibv_device * virt_to_phys_ibv_device(struct ibv_device * device); +struct ibv_contect * virt_to_phys_ibv_context(struct ibv_contect * context); +struct ibv_context_ops * virt_to_phys_ibv_context_ops(struct ibv_context_ops * context_ops); +struct ibv_port_attr * virt_to_phys_ibv_port_attr(struct ibv_port_attr * port_attr); + +void phys_to_virt_ibv_device(struct ibv_device * device); +void phys_to_virt_ibv_context(struct ibv_contect * context); +void phys_to_virt_ibv_context_ops(struct ibv_context_ops * context_ops); +void phys_to_virt_ibv_port_attr(struct ibv_port_attr * port_attr); diff --git a/kernel/ibv.c b/kernel/ibv.c index ef0a766d0..76b8f902d 100644 --- a/kernel/ibv.c +++ b/kernel/ibv.c @@ -35,8 +35,9 @@ #include #include -#include // GEHT -#include +#include +#include + // TODO: Can/should we separate ibv_get_device_list into two KVM exit IOs to // allocate the right amount of memory? @@ -129,8 +130,8 @@ typedef struct { int ibv_query_port(struct ibv_context * context, uint8_t port_num, struct ibv_port_attr * port_attr) { uhyve_ibv_query_port_t uhyve_args; - uhyve_args->context = virt_to_phys_ibv_context(context); - uhyve_args->port_num = port_num; + uhyve_args->context = virt_to_phys_ibv_context(context); + uhyve_args->port_num = port_num; uhyve_args->port_attr = virt_to_phys_ibv_port_attr(port_attr); uhyve_send(UHYVE_PORT_IBV_QUERY_PORT, (unsigned) virt_to_phys((size_t) &uhyve_args)); diff --git a/kernel/ibv_struct_address_conversion.c b/kernel/ibv_struct_address_conversion.c new file mode 100644 index 000000000..eb7bcee56 --- /dev/null +++ b/kernel/ibv_struct_address_conversion.c @@ -0,0 +1,259 @@ +/* + * Copyright (c) 2017, Annika Wierichs, RWTH Aachen University + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this + * software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * TODO: Documentation + * + */ + + +#include + +#include + + +/* + * struct ibv_device + */ + +static struct { + char * name; + char * dev_name; + char * dev_path; + char * ibdev_path; +} ibv_device_virt_ptrs; + +struct ibv_device * virt_to_phys_ibv_device(struct ibv_device * device) { + ibv_device_virt_ptrs.name = device->name;; + ibv_device_virt_ptrs.dev_name = device->dev_name; + ibv_device_virt_ptrs.dev_path = device->dev_path; + ibv_device_virt_ptrs.ibdev_path = device->ibdev_path; + + device->name = (char *) virt_to_phys((size_t) device->name); + device->dev_name = (char *) virt_to_phys((size_t) device->dev_name); + device->dev_path = (char *) virt_to_phys((size_t) device->dev_path); + device->ibdev_path = (char *) virt_to_phys((size_t) device->ibdev_path); + // _ops obsolete. + + return (struct ibv_device *) virt_to_phys((size_t) device); +} + +void phys_to_virt_ibv_device(struct ibv_device * device) { + device->name = ibv_device_virt_ptrs.name; + device->dev_name = ibv_device_virt_ptrs.dev_name; + device->dev_path = ibv_device_virt_ptrs.dev_path; + device->ibdev_path = ibv_device_virt_ptrs.ibdev_path; + // _ops obsolete. +} + + +/* + * struct ibv_context + */ + +// struct ibv_context { +// struct ibv_context_ops ops; +// pthread_mutex_t mutex; +// void *abi_compat; +// }; + +static struct { + struct ibv_device * device; + /*void * abi_compat; // TODO*/ +} ibv_context_virt_ptrs; + +struct ibv_context * virt_to_phys_ibv_context(struct ibv_context * context) { + ibv_context_virt_ptrs.device = context->device, + + context->device = virt_to_phys_ibv_device(context->device); + virt_to_phys_ibv_context_ops(&context->ops); + /*virt_to_phys_pthread_mutex_t(&context->mutex); // TODO*/ + + return (struct ibv_context *) virt_to_phys((size_t) context); +} + +void phys_to_virt_ibv_context(struct ibv_context * context) { + context->device = ibv_context_virt_ptrs.device; + + phys_to_virt_ibv_device(context->device); + phys_to_virt_ibv_context_ops(&context->ops); +} + + +/* + * struct ibv_context_ops + */ + +static struct { + int (*query_device)(struct ibv_context *, struct ibv_device_attr *); + int (*query_port)(struct ibv_context *, uint8_t, struct ibv_port_attr *); + struct ibv_pd * (*alloc_pd)(struct ibv_context *); + int (*dealloc_pd)(struct ibv_pd *); + struct ibv_mr * (*reg_mr)(struct ibv_pd *, void *, size_t, int); + int (*rereg_mr)(struct ibv_mr *, int, struct ibv_pd *, void *, size_t, int); + int (*dereg_mr)(struct ibv_mr *); + struct ibv_mw * (*alloc_mw)(struct ibv_pd *, enum ibv_mw_type); + int (*bind_mw)(struct ibv_qp *, struct ibv_mw *, struct ibv_mw_bind *); + int (*dealloc_mw)(struct ibv_mw *); + struct ibv_cq * (*create_cq)(struct ibv_context *, int, struct ibv_comp_channel *, int); + int (*poll_cq)(struct ibv_cq *, int, struct ibv_wc *); + int (*req_notify_cq)(struct ibv_cq *, int); + void (*cq_event)(struct ibv_cq *); + int (*resize_cq)(struct ibv_cq *, int); + int (*destroy_cq)(struct ibv_cq *); + struct ibv_srq * (*create_srq)(struct ibv_pd *, struct ibv_srq_init_attr *); + int (*modify_srq)(struct ibv_srq *, struct ibv_srq_attr *, int); + int (*query_srq)(struct ibv_srq *, struct ibv_srq_attr *); + int (*destroy_srq)(struct ibv_srq *); + int (*post_srq_recv)(struct ibv_srq *, struct ibv_recv_wr *, struct ibv_recv_wr **); + struct ibv_qp * (*create_qp)(struct ibv_pd *, struct ibv_qp_init_attr *); + int (*query_qp)(struct ibv_qp *, struct ibv_qp_attr *, int, struct ibv_qp_init_attr *); + int (*modify_qp)(struct ibv_qp *, struct ibv_qp_attr *, int); + int (*destroy_qp)(struct ibv_qp *); + int (*post_send)(struct ibv_qp *, struct ibv_send_wr *, struct ibv_send_wr **); + int (*post_recv)(struct ibv_qp *, struct ibv_recv_wr *, struct ibv_recv_wr **); + struct ibv_ah * (*create_ah)(struct ibv_pd *, struct ibv_ah_attr *); + int (*destroy_ah)(struct ibv_ah *); + int (*attach_mcast)(struct ibv_qp *, const union ibv_gid *, uint16_t); + int (*detach_mcast)(struct ibv_qp *, const union ibv_gid *, uint16_t); + void (*async_event)(struct ibv_async_event *); +} ibv_context_ops_virt_ptrs; + +struct ibv_context_ops * virt_to_phys_ibv_context_ops(struct ibv_context_ops * context_ops) { + ibv_context_virt_ptrs.query_device = context_ops->query_device; + ibv_context_virt_ptrs.query_port = context_ops->query_port; + ibv_context_virt_ptrs.alloc_pd = context_ops->alloc_pd; + ibv_context_virt_ptrs.dealloc_pd = context_ops->dealloc_pd; + ibv_context_virt_ptrs.reg_mr = context_ops->reg_mr; + ibv_context_virt_ptrs.rereg_mr = context_ops->rereg_mr; + ibv_context_virt_ptrs.dereg_mr = context_ops->dereg_mr; + ibv_context_virt_ptrs.alloc_mw = context_ops->alloc_mw; + ibv_context_virt_ptrs.bind_mw = context_ops->bind_mw; + ibv_context_virt_ptrs.dealloc_mw = context_ops->dealloc_mw; + ibv_context_virt_ptrs.create_cq = context_ops->create_cq; + ibv_context_virt_ptrs.poll_cq = context_ops->poll_cq; + ibv_context_virt_ptrs.req_notify_cq = context_ops->req_notify_cq; + ibv_context_virt_ptrs.cq_event = context_ops->cq_event; + ibv_context_virt_ptrs.resize_cq = context_ops->resize_cq; + ibv_context_virt_ptrs.destroy_cq = context_ops->destroy_cq; + ibv_context_virt_ptrs.create_srq = context_ops->create_srq; + ibv_context_virt_ptrs.modify_srq = context_ops->modify_srq; + ibv_context_virt_ptrs.query_srq = context_ops->query_srq; + ibv_context_virt_ptrs.destroy_srq = context_ops->destroy_srq; + ibv_context_virt_ptrs.post_srq_recv = context_ops->post_srq_recv; + ibv_context_virt_ptrs.create_qp = context_ops->create_qp; + ibv_context_virt_ptrs.query_qp = context_ops->query_qp; + ibv_context_virt_ptrs.modify_qp = context_ops->modify_qp; + ibv_context_virt_ptrs.destroy_qp = context_ops->destroy_qp; + ibv_context_virt_ptrs.post_send = context_ops->post_send; + ibv_context_virt_ptrs.post_recv = context_ops->post_recv; + ibv_context_virt_ptrs.create_ah = context_ops->create_ah; + ibv_context_virt_ptrs.destroy_ah = context_ops->destroy_ah; + ibv_context_virt_ptrs.attach_mcast = context_ops->attach_mcast; + ibv_context_virt_ptrs.detach_mcast = context_ops->detach_mcast; + ibv_context_virt_ptrs.async_event = context_ops->async_event; + + context_ops->query_device = virt_to_phys((size_t) context_ops->query_device); + context_ops->query_port = virt_to_phys((size_t) context_ops->query_port); + context_ops->alloc_pd = virt_to_phys((size_t) context_ops->alloc_pd); + context_ops->dealloc_pd = virt_to_phys((size_t) context_ops->dealloc_pd); + context_ops->reg_mr = virt_to_phys((size_t) context_ops->reg_mr); + context_ops->rereg_mr = virt_to_phys((size_t) context_ops->rereg_mr); + context_ops->dereg_mr = virt_to_phys((size_t) context_ops->dereg_mr); + context_ops->alloc_mw = virt_to_phys((size_t) context_ops->alloc_mw); + context_ops->bind_mw = virt_to_phys((size_t) context_ops->bind_mw); + context_ops->dealloc_mw = virt_to_phys((size_t) context_ops->dealloc_mw); + context_ops->create_cq = virt_to_phys((size_t) context_ops->create_cq); + context_ops->poll_cq = virt_to_phys((size_t) context_ops->poll_cq); + context_ops->req_notify_cq = virt_to_phys((size_t) context_ops->req_notify_cq); + context_ops->cq_event = virt_to_phys((size_t) context_ops->cq_event); + context_ops->resize_cq = virt_to_phys((size_t) context_ops->resize_cq); + context_ops->destroy_cq = virt_to_phys((size_t) context_ops->destroy_cq); + context_ops->create_srq = virt_to_phys((size_t) context_ops->create_srq); + context_ops->modify_srq = virt_to_phys((size_t) context_ops->modify_srq); + context_ops->query_srq = virt_to_phys((size_t) context_ops->query_srq); + context_ops->destroy_srq = virt_to_phys((size_t) context_ops->destroy_srq); + context_ops->post_srq_recv = virt_to_phys((size_t) context_ops->post_srq_recv); + context_ops->create_qp = virt_to_phys((size_t) context_ops->create_qp); + context_ops->query_qp = virt_to_phys((size_t) context_ops->query_qp); + context_ops->modify_qp = virt_to_phys((size_t) context_ops->modify_qp); + context_ops->destroy_qp = virt_to_phys((size_t) context_ops->destroy_qp); + context_ops->post_send = virt_to_phys((size_t) context_ops->post_send); + context_ops->post_recv = virt_to_phys((size_t) context_ops->post_recv); + context_ops->create_ah = virt_to_phys((size_t) context_ops->create_ah); + context_ops->destroy_ah = virt_to_phys((size_t) context_ops->destroy_ah); + context_ops->attach_mcast = virt_to_phys((size_t) context_ops->attach_mcast); + context_ops->detach_mcast = virt_to_phys((size_t) context_ops->detach_mcast); + context_ops->async_event = virt_to_phys((size_t) context_ops->async_event); + + return (struct ibv_context_ops *) virt_to_phys((size_t) context_ops); +} + +void phys_to_virt_ibv_context_ops(struct ibv_context_ops * context_ops) { + context_ops->query_device = ibv_context_ops_virt_ptrs.query_device; + context_ops->query_port = ibv_context_ops_virt_ptrs.query_port; + context_ops->alloc_pd = ibv_context_ops_virt_ptrs.alloc_pd; + context_ops->dealloc_pd = ibv_context_ops_virt_ptrs.dealloc_pd; + context_ops->reg_mr = ibv_context_ops_virt_ptrs.reg_mr; + context_ops->rereg_mr = ibv_context_ops_virt_ptrs.rereg_mr; + context_ops->dereg_mr = ibv_context_ops_virt_ptrs.dereg_mr; + context_ops->alloc_mw = ibv_context_ops_virt_ptrs.alloc_mw; + context_ops->bind_mw = ibv_context_ops_virt_ptrs.bind_mw; + context_ops->dealloc_mw = ibv_context_ops_virt_ptrs.dealloc_mw; + context_ops->create_cq = ibv_context_ops_virt_ptrs.create_cq; + context_ops->poll_cq = ibv_context_ops_virt_ptrs.poll_cq; + context_ops->req_notify_cq = ibv_context_ops_virt_ptrs.req_notify_cq; + context_ops->cq_event = ibv_context_ops_virt_ptrs.cq_event; + context_ops->resize_cq = ibv_context_ops_virt_ptrs.resize_cq; + context_ops->destroy_cq = ibv_context_ops_virt_ptrs.destroy_cq; + context_ops->create_srq = ibv_context_ops_virt_ptrs.create_srq; + context_ops->modify_srq = ibv_context_ops_virt_ptrs.modify_srq; + context_ops->query_srq = ibv_context_ops_virt_ptrs.query_srq; + context_ops->destroy_srq = ibv_context_ops_virt_ptrs.destroy_srq; + context_ops->post_srq_recv = ibv_context_ops_virt_ptrs.post_srq_recv; + context_ops->create_qp = ibv_context_ops_virt_ptrs.create_qp; + context_ops->query_qp = ibv_context_ops_virt_ptrs.query_qp; + context_ops->modify_qp = ibv_context_ops_virt_ptrs.modify_qp; + context_ops->destroy_qp = ibv_context_ops_virt_ptrs.destroy_qp; + context_ops->post_send = ibv_context_ops_virt_ptrs.post_send; + context_ops->post_recv = ibv_context_ops_virt_ptrs.post_recv; + context_ops->create_ah = ibv_context_ops_virt_ptrs.create_ah; + context_ops->destroy_ah = ibv_context_ops_virt_ptrs.destroy_ah; + context_ops->attach_mcast = ibv_context_ops_virt_ptrs.attach_mcast; + context_ops->detach_mcast = ibv_context_ops_virt_ptrs.detach_mcast; + context_ops->async_event = ibv_context_ops_virt_ptrs.async_event; + +} + + +/* + * struct ibv_port_attr + */ + +struct ibv_port_attr * virt_to_phys_ibv_port_attr(struct ibv_port_attr * port_attr) { + return (struct ibv_port_attr *) virt_to_phys((size_t) port_attr); +} + +void phys_to_virt_ibv_port_attr(struct ibv_port_attr * port_attr) {} diff --git a/tools/uhyve-ibv.c b/tools/uhyve-ibv.c index a6e4f266f..384039111 100644 --- a/tools/uhyve-ibv.c +++ b/tools/uhyve-ibv.c @@ -51,6 +51,13 @@ /*}*/ /*}*/ + +struct ibv_context * ibv_open_device(struct ibv_device * device); +const char* ibv_get_device_name(struct ibv_device *device); +int ibv_query_port(struct ibv_context * context, uint8_t port_num, struct ibv_port_attr * port_attr); +struct ibv_comp_channel * ibv_create_comp_channel(struct ibv_context * context); + + void call_ibv_open_device(struct kvm_run * run, uint8_t * guest_mem) { unsigned data = *((unsigned*)((size_t)run+run->io.data_offset)); uhyve_ibv_open_device_t * args = (uhyve_ibv_open_device_t *) (guest_mem + data); @@ -58,19 +65,23 @@ void call_ibv_open_device(struct kvm_run * run, uint8_t * guest_mem) { struct ibv_context * host_ret = ibv_open_device(guest_mem+(size_t)args->device); memcpy(guest_mem+(size_t)args->ret, host_ret, sizeof(host_ret)); // TODO: Convert ptrs contained in return value. - // TODO: Delete host_ret data structure. + free(host_ret); } + void call_ibv_get_device_name(struct kvm_run * run, uint8_t * guest_mem) { unsigned data = *((unsigned*)((size_t)run+run->io.data_offset)); uhyve_ibv_get_device_name_t * args = (uhyve_ibv_get_device_name_t *) (guest_mem + data); + // TODO: Tricky because char ptr isn't allocated in called function. const char * host_ret = ibv_get_device_name(guest_mem+(size_t)args->device); memcpy(guest_mem+(size_t)args->ret, host_ret, sizeof(host_ret)); // TODO: Convert ptrs contained in return value. - // TODO: Delete host_ret data structure. + // TODO: How to tell if ret needs to be deleted? } + +// Return done void call_ibv_query_port(struct kvm_run * run, uint8_t * guest_mem) { unsigned data = *((unsigned*)((size_t)run+run->io.data_offset)); uhyve_ibv_query_port_t * args = (uhyve_ibv_query_port_t *) (guest_mem + data); @@ -79,12 +90,16 @@ void call_ibv_query_port(struct kvm_run * run, uint8_t * guest_mem) { args->ret = host_ret; } + +// Return done void call_ibv_create_comp_channel(struct kvm_run * run, uint8_t * guest_mem) { unsigned data = *((unsigned*)((size_t)run+run->io.data_offset)); uhyve_ibv_create_comp_channel_t * args = (uhyve_ibv_create_comp_channel_t *) (guest_mem + data); struct ibv_comp_channel * host_ret = ibv_create_comp_channel(guest_mem+(size_t)args->context); - memcpy(guest_mem+(size_t)args->ret, host_ret, sizeof(host_ret)); + host_ret->context -= guest_mem; // Should be in separate function. // TODO: Convert ptrs contained in return value. - // TODO: Delete host_ret data structure. + + memcpy(guest_mem+(size_t)args->ret, host_ret, sizeof(host_ret)); + free(host_ret); // TODO add to code gen }