mirror of
https://github.com/hermitcore/libhermit.git
synced 2025-03-09 00:00:03 +01:00
Finished custom snippets in code generator. ibv_post functions now automatically generated.
This commit is contained in:
parent
a1980b40bf
commit
da0809b4c0
6 changed files with 468 additions and 179 deletions
|
@ -78,7 +78,7 @@ int ibv_destroy_wq(struct ibv_wq * wq);
|
|||
struct ibv_rwq_ind_table * ibv_create_rwq_ind_table(struct ibv_context * context, struct ibv_rwq_ind_table_init_attr * init_attr);
|
||||
int ibv_destroy_rwq_ind_table(struct ibv_rwq_ind_table * rwq_ind_table);
|
||||
int ibv_post_send(struct ibv_qp * qp, struct ibv_send_wr * wr, struct ibv_send_wr ** bad_wr);
|
||||
int ibv_post_recv(struct ibv_qp * qp, struct ibv_recv_wr * wr, struct ibv_recv_wr ** bad_wr);
|
||||
int ibv_post_recv(struct ibv_qp * qp, struct ibv_recv_wr * recv_wr, struct ibv_recv_wr ** bad_recv_wr);
|
||||
struct ibv_ah * ibv_create_ah(struct ibv_pd * pd, struct ibv_ah_attr * attr);
|
||||
int ibv_init_ah_from_wc(struct ibv_context * context, uint8_t port_num, struct ibv_wc * wc, struct ibv_grh * grh, struct ibv_ah_attr * ah_attr);
|
||||
struct ibv_ah * ibv_create_ah_from_wc(struct ibv_pd * pd, struct ibv_wc * wc, struct ibv_grh * grh, uint8_t port_num);
|
||||
|
|
|
@ -499,13 +499,62 @@ int ibv_post_wq_recv(struct ibv_wq * wq, struct ibv_recv_wr * recv_wr, struct ib
|
|||
uhyve_args.recv_wr = (struct ibv_recv_wr *) guest_to_host((size_t) recv_wr);
|
||||
uhyve_args.bad_recv_wr = (struct ibv_recv_wr **) guest_to_host((size_t) bad_recv_wr); // TODO: Check ** param here.
|
||||
|
||||
// TODO: Take care of pointer conversions in recv_wr.
|
||||
// TODO: Take care of pointer conversions in bad_recv_wr.
|
||||
struct ibv_recv_wr * curr_wr;
|
||||
int num_sges_max;
|
||||
int num_wrs;
|
||||
|
||||
// Number of work requests in linked list and SGEs in each WR
|
||||
num_wrs = 0;
|
||||
num_sges_max = 0;
|
||||
for (curr_wr = recv_wr; curr_wr; curr_wr = curr_wr->next) {
|
||||
num_wrs++;
|
||||
if (num_sges_max < curr_wr->num_sge) {
|
||||
num_sges_max = curr_wr->num_sge;
|
||||
}
|
||||
}
|
||||
|
||||
// Backup arrays for original guest memory pointers
|
||||
struct ibv_recv_wr * wr__next [num_wrs];
|
||||
struct ibv_sge * wr__sg_list [num_wrs];
|
||||
uint64_t wr__sg_list__addr[num_wrs][num_sges_max];
|
||||
|
||||
curr_wr = recv_wr;
|
||||
for (int w = 0; w < num_wrs; w++) {
|
||||
// Next pointer
|
||||
wr__next[w] = curr_wr->next;
|
||||
curr_wr->next = (struct ibv_recv_wr *) guest_to_host((size_t) curr_wr->next);
|
||||
|
||||
// SGE array
|
||||
for (int s = 0; s < curr_wr->num_sge; s++) {
|
||||
wr__sg_list__addr[w][s] = curr_wr->sg_list[s].addr;
|
||||
curr_wr->sg_list[s].addr = (uint64_t) guest_to_host((size_t) curr_wr->sg_list[s].addr);
|
||||
}
|
||||
|
||||
wr__sg_list[w] = curr_wr->sg_list;
|
||||
curr_wr->sg_list = (struct ibv_sge *) guest_to_host((size_t) curr_wr->sg_list);
|
||||
|
||||
curr_wr = wr__next[w];
|
||||
}
|
||||
uhyve_send(UHYVE_PORT_IBV_POST_WQ_RECV, (unsigned) virt_to_phys((size_t) &uhyve_args));
|
||||
|
||||
// TODO: Take care of reverse pointer conversions in recv_wr.
|
||||
// TODO: Take care of reverse pointer conversions in bad_recv_wr.
|
||||
if (*bad_recv_wr && *bad_recv_wr == uhyve_args.recv_wr) {
|
||||
*bad_recv_wr = recv_wr;
|
||||
}
|
||||
|
||||
curr_wr = recv_wr;
|
||||
for (int w = 0; w < num_wrs; w++) {
|
||||
if (*bad_recv_wr && *bad_recv_wr == curr_wr->next) {
|
||||
*bad_recv_wr = wr__next[w];
|
||||
}
|
||||
|
||||
curr_wr->next = wr__next[w];
|
||||
curr_wr->sg_list = wr__sg_list[w];
|
||||
for (int s = 0; s < curr_wr->num_sge; s++) {
|
||||
curr_wr->sg_list[s].addr = wr__sg_list__addr[w][s];
|
||||
}
|
||||
|
||||
curr_wr = curr_wr->next;
|
||||
}
|
||||
|
||||
return uhyve_args.ret;
|
||||
}
|
||||
|
@ -880,12 +929,8 @@ struct ibv_xrcd * ibv_open_xrcd(struct ibv_context * context, struct ibv_xrcd_in
|
|||
uhyve_args.context = context;
|
||||
uhyve_args.xrcd_init_attr = (struct ibv_xrcd_init_attr *) guest_to_host((size_t) xrcd_init_attr);
|
||||
|
||||
// TODO: Take care of pointer conversions in xrcd_init_attr.
|
||||
|
||||
uhyve_send(UHYVE_PORT_IBV_OPEN_XRCD, (unsigned) virt_to_phys((size_t) &uhyve_args));
|
||||
|
||||
// TODO: Take care of reverse pointer conversions in xrcd_init_attr.
|
||||
|
||||
return uhyve_args.ret;
|
||||
}
|
||||
|
||||
|
@ -1468,13 +1513,62 @@ int ibv_post_srq_recv(struct ibv_srq * srq, struct ibv_recv_wr * recv_wr, struct
|
|||
uhyve_args.recv_wr = (struct ibv_recv_wr *) guest_to_host((size_t) recv_wr);
|
||||
uhyve_args.bad_recv_wr = (struct ibv_recv_wr **) guest_to_host((size_t) bad_recv_wr); // TODO: Check ** param here.
|
||||
|
||||
// TODO: Take care of pointer conversions in recv_wr.
|
||||
// TODO: Take care of pointer conversions in bad_recv_wr.
|
||||
struct ibv_recv_wr * curr_wr;
|
||||
int num_sges_max;
|
||||
int num_wrs;
|
||||
|
||||
// Number of work requests in linked list and SGEs in each WR
|
||||
num_wrs = 0;
|
||||
num_sges_max = 0;
|
||||
for (curr_wr = recv_wr; curr_wr; curr_wr = curr_wr->next) {
|
||||
num_wrs++;
|
||||
if (num_sges_max < curr_wr->num_sge) {
|
||||
num_sges_max = curr_wr->num_sge;
|
||||
}
|
||||
}
|
||||
|
||||
// Backup arrays for original guest memory pointers
|
||||
struct ibv_recv_wr * wr__next [num_wrs];
|
||||
struct ibv_sge * wr__sg_list [num_wrs];
|
||||
uint64_t wr__sg_list__addr[num_wrs][num_sges_max];
|
||||
|
||||
curr_wr = recv_wr;
|
||||
for (int w = 0; w < num_wrs; w++) {
|
||||
// Next pointer
|
||||
wr__next[w] = curr_wr->next;
|
||||
curr_wr->next = (struct ibv_recv_wr *) guest_to_host((size_t) curr_wr->next);
|
||||
|
||||
// SGE array
|
||||
for (int s = 0; s < curr_wr->num_sge; s++) {
|
||||
wr__sg_list__addr[w][s] = curr_wr->sg_list[s].addr;
|
||||
curr_wr->sg_list[s].addr = (uint64_t) guest_to_host((size_t) curr_wr->sg_list[s].addr);
|
||||
}
|
||||
|
||||
wr__sg_list[w] = curr_wr->sg_list;
|
||||
curr_wr->sg_list = (struct ibv_sge *) guest_to_host((size_t) curr_wr->sg_list);
|
||||
|
||||
curr_wr = wr__next[w];
|
||||
}
|
||||
uhyve_send(UHYVE_PORT_IBV_POST_SRQ_RECV, (unsigned) virt_to_phys((size_t) &uhyve_args));
|
||||
|
||||
// TODO: Take care of reverse pointer conversions in recv_wr.
|
||||
// TODO: Take care of reverse pointer conversions in bad_recv_wr.
|
||||
if (*bad_recv_wr && *bad_recv_wr == uhyve_args.recv_wr) {
|
||||
*bad_recv_wr = recv_wr;
|
||||
}
|
||||
|
||||
curr_wr = recv_wr;
|
||||
for (int w = 0; w < num_wrs; w++) {
|
||||
if (*bad_recv_wr && *bad_recv_wr == curr_wr->next) {
|
||||
*bad_recv_wr = wr__next[w];
|
||||
}
|
||||
|
||||
curr_wr->next = wr__next[w];
|
||||
curr_wr->sg_list = wr__sg_list[w];
|
||||
for (int s = 0; s < curr_wr->num_sge; s++) {
|
||||
curr_wr->sg_list[s].addr = wr__sg_list__addr[w][s];
|
||||
}
|
||||
|
||||
curr_wr = curr_wr->next;
|
||||
}
|
||||
|
||||
return uhyve_args.ret;
|
||||
}
|
||||
|
@ -1754,11 +1848,14 @@ struct ibv_rwq_ind_table * ibv_create_rwq_ind_table(struct ibv_context * context
|
|||
uhyve_args.context = context;
|
||||
uhyve_args.init_attr = (struct ibv_rwq_ind_table_init_attr *) guest_to_host((size_t) init_attr);
|
||||
|
||||
// TODO: Take care of pointer conversions in init_attr.
|
||||
// TODO: Entries of the list should be universal references residing in the IB memory pool.
|
||||
// Confirm this.
|
||||
struct ibv_wq ** init_attr__ind_tbl = init_attr->ind_tbl;
|
||||
init_attr->ind_tbl = (struct ibv_wq **) guest_to_host((size_t) init_attr->ind_tbl);
|
||||
|
||||
uhyve_send(UHYVE_PORT_IBV_CREATE_RWQ_IND_TABLE, (unsigned) virt_to_phys((size_t) &uhyve_args));
|
||||
|
||||
// TODO: Take care of reverse pointer conversions in init_attr.
|
||||
init_attr->ind_tbl = init_attr__ind_tbl;
|
||||
|
||||
return uhyve_args.ret;
|
||||
}
|
||||
|
@ -1804,13 +1901,89 @@ int ibv_post_send(struct ibv_qp * qp, struct ibv_send_wr * wr, struct ibv_send_w
|
|||
uhyve_args.wr = (struct ibv_send_wr *) guest_to_host((size_t) wr);
|
||||
uhyve_args.bad_wr = (struct ibv_send_wr **) guest_to_host((size_t) bad_wr); // TODO: Check ** param here.
|
||||
|
||||
// TODO: Take care of pointer conversions in wr.
|
||||
// TODO: Take care of pointer conversions in bad_wr.
|
||||
struct ibv_send_wr * curr_wr;
|
||||
int num_wrs;
|
||||
int num_sges_max;
|
||||
int is_bind_mw, is_tso;
|
||||
|
||||
// Number of work requests in linked list and SGEs in each WR
|
||||
num_wrs = 0;
|
||||
num_sges_max = 0;
|
||||
for (curr_wr = wr; curr_wr; curr_wr = curr_wr->next) {
|
||||
num_wrs++;
|
||||
if (num_sges_max < curr_wr->num_sge) {
|
||||
num_sges_max = curr_wr->num_sge;
|
||||
}
|
||||
}
|
||||
|
||||
// Backup arrays for original guest memory pointers
|
||||
struct ibv_send_wr * wr__next [num_wrs];
|
||||
struct ibv_sge * wr__sg_list [num_wrs];
|
||||
uint64_t wr__sg_list__addr [num_wrs][num_sges_max];
|
||||
uint64_t wr__bind_mw__bind_info__addr[num_wrs];
|
||||
void * wr__tso__hdr [num_wrs];
|
||||
|
||||
curr_wr = wr;
|
||||
for (int w = 0; w < num_wrs; w++) {
|
||||
is_bind_mw = curr_wr->opcode == IBV_WR_BIND_MW;
|
||||
is_tso = curr_wr->opcode == IBV_WR_TSO;
|
||||
|
||||
// union: bind_mw and tso
|
||||
if (is_bind_mw) {
|
||||
wr__bind_mw__bind_info__addr[w] = curr_wr->bind_mw.bind_info.addr;
|
||||
curr_wr->bind_mw.bind_info.addr = (uint64_t) guest_to_host((size_t) curr_wr->bind_mw.bind_info.addr);
|
||||
} else if (is_tso) {
|
||||
wr__tso__hdr[w] = curr_wr->tso.hdr;
|
||||
curr_wr->tso.hdr = (void *) guest_to_host((size_t) curr_wr->tso.hdr);
|
||||
}
|
||||
|
||||
// Next pointer and SGE array
|
||||
wr__next[w] = curr_wr->next;
|
||||
curr_wr->next = (struct ibv_send_wr *) guest_to_host((size_t) curr_wr->next);
|
||||
|
||||
for (int s = 0; s < curr_wr->num_sge; s++) {
|
||||
wr__sg_list__addr[w][s] = curr_wr->sg_list[s].addr;
|
||||
curr_wr->sg_list[s].addr = (uint64_t) guest_to_host((size_t) curr_wr->sg_list[s].addr);
|
||||
}
|
||||
|
||||
wr__sg_list[w] = curr_wr->sg_list;
|
||||
curr_wr->sg_list = (struct ibv_sge *) guest_to_host((size_t) curr_wr->sg_list);
|
||||
|
||||
curr_wr = wr__next[w];
|
||||
}
|
||||
|
||||
uhyve_send(UHYVE_PORT_IBV_POST_SEND, (unsigned) virt_to_phys((size_t) &uhyve_args));
|
||||
|
||||
// TODO: Take care of reverse pointer conversions in wr.
|
||||
// TODO: Take care of reverse pointer conversions in bad_wr.
|
||||
if (*bad_wr && *bad_wr == uhyve_args.wr) {
|
||||
*bad_wr = wr;
|
||||
}
|
||||
|
||||
curr_wr = wr;
|
||||
for (int w = 0; w < num_wrs; w++) {
|
||||
is_bind_mw = curr_wr->opcode == IBV_WR_BIND_MW;
|
||||
is_tso = curr_wr->opcode == IBV_WR_TSO;
|
||||
|
||||
// union: bind_mw and tso
|
||||
if (is_bind_mw) {
|
||||
curr_wr->bind_mw.bind_info.addr = wr__bind_mw__bind_info__addr[w];
|
||||
} else if (is_tso) {
|
||||
curr_wr->tso.hdr = wr__tso__hdr[w];
|
||||
}
|
||||
|
||||
// Bad request
|
||||
if (*bad_wr && *bad_wr == curr_wr->next) {
|
||||
*bad_wr = wr__next[w];
|
||||
}
|
||||
|
||||
// Next pointer and SGE array
|
||||
curr_wr->next = wr__next[w];
|
||||
curr_wr->sg_list = wr__sg_list[w];
|
||||
for (int s = 0; s < curr_wr->num_sge; s++) {
|
||||
curr_wr->sg_list[s].addr = wr__sg_list__addr[w][s];
|
||||
}
|
||||
|
||||
curr_wr = curr_wr->next;
|
||||
}
|
||||
|
||||
return uhyve_args.ret;
|
||||
}
|
||||
|
@ -1823,25 +1996,74 @@ int ibv_post_send(struct ibv_qp * qp, struct ibv_send_wr * wr, struct ibv_send_w
|
|||
typedef struct {
|
||||
// Parameters:
|
||||
struct ibv_qp * qp;
|
||||
struct ibv_recv_wr * wr;
|
||||
struct ibv_recv_wr ** bad_wr;
|
||||
struct ibv_recv_wr * recv_wr;
|
||||
struct ibv_recv_wr ** bad_recv_wr;
|
||||
// Return value:
|
||||
int ret;
|
||||
} __attribute__((packed)) uhyve_ibv_post_recv_t;
|
||||
|
||||
int ibv_post_recv(struct ibv_qp * qp, struct ibv_recv_wr * wr, struct ibv_recv_wr ** bad_wr) {
|
||||
int ibv_post_recv(struct ibv_qp * qp, struct ibv_recv_wr * recv_wr, struct ibv_recv_wr ** bad_recv_wr) {
|
||||
uhyve_ibv_post_recv_t uhyve_args;
|
||||
uhyve_args.qp = qp;
|
||||
uhyve_args.wr = (struct ibv_recv_wr *) guest_to_host((size_t) wr);
|
||||
uhyve_args.bad_wr = (struct ibv_recv_wr **) guest_to_host((size_t) bad_wr); // TODO: Check ** param here.
|
||||
uhyve_args.recv_wr = (struct ibv_recv_wr *) guest_to_host((size_t) recv_wr);
|
||||
uhyve_args.bad_recv_wr = (struct ibv_recv_wr **) guest_to_host((size_t) bad_recv_wr); // TODO: Check ** param here.
|
||||
|
||||
// TODO: Take care of pointer conversions in wr.
|
||||
// TODO: Take care of pointer conversions in bad_wr.
|
||||
struct ibv_recv_wr * curr_wr;
|
||||
int num_sges_max;
|
||||
int num_wrs;
|
||||
|
||||
// Number of work requests in linked list and SGEs in each WR
|
||||
num_wrs = 0;
|
||||
num_sges_max = 0;
|
||||
for (curr_wr = recv_wr; curr_wr; curr_wr = curr_wr->next) {
|
||||
num_wrs++;
|
||||
if (num_sges_max < curr_wr->num_sge) {
|
||||
num_sges_max = curr_wr->num_sge;
|
||||
}
|
||||
}
|
||||
|
||||
// Backup arrays for original guest memory pointers
|
||||
struct ibv_recv_wr * wr__next [num_wrs];
|
||||
struct ibv_sge * wr__sg_list [num_wrs];
|
||||
uint64_t wr__sg_list__addr[num_wrs][num_sges_max];
|
||||
|
||||
curr_wr = recv_wr;
|
||||
for (int w = 0; w < num_wrs; w++) {
|
||||
// Next pointer
|
||||
wr__next[w] = curr_wr->next;
|
||||
curr_wr->next = (struct ibv_recv_wr *) guest_to_host((size_t) curr_wr->next);
|
||||
|
||||
// SGE array
|
||||
for (int s = 0; s < curr_wr->num_sge; s++) {
|
||||
wr__sg_list__addr[w][s] = curr_wr->sg_list[s].addr;
|
||||
curr_wr->sg_list[s].addr = (uint64_t) guest_to_host((size_t) curr_wr->sg_list[s].addr);
|
||||
}
|
||||
|
||||
wr__sg_list[w] = curr_wr->sg_list;
|
||||
curr_wr->sg_list = (struct ibv_sge *) guest_to_host((size_t) curr_wr->sg_list);
|
||||
|
||||
curr_wr = wr__next[w];
|
||||
}
|
||||
uhyve_send(UHYVE_PORT_IBV_POST_RECV, (unsigned) virt_to_phys((size_t) &uhyve_args));
|
||||
|
||||
// TODO: Take care of reverse pointer conversions in wr.
|
||||
// TODO: Take care of reverse pointer conversions in bad_wr.
|
||||
if (*bad_recv_wr && *bad_recv_wr == uhyve_args.recv_wr) {
|
||||
*bad_recv_wr = recv_wr;
|
||||
}
|
||||
|
||||
curr_wr = recv_wr;
|
||||
for (int w = 0; w < num_wrs; w++) {
|
||||
if (*bad_recv_wr && *bad_recv_wr == curr_wr->next) {
|
||||
*bad_recv_wr = wr__next[w];
|
||||
}
|
||||
|
||||
curr_wr->next = wr__next[w];
|
||||
curr_wr->sg_list = wr__sg_list[w];
|
||||
for (int s = 0; s < curr_wr->num_sge; s++) {
|
||||
curr_wr->sg_list[s].addr = wr__sg_list__addr[w][s];
|
||||
}
|
||||
|
||||
curr_wr = curr_wr->next;
|
||||
}
|
||||
|
||||
return uhyve_args.ret;
|
||||
}
|
||||
|
|
|
@ -712,8 +712,8 @@ typedef struct {
|
|||
typedef struct {
|
||||
// Parameters:
|
||||
struct ibv_qp * qp;
|
||||
struct ibv_recv_wr * wr;
|
||||
struct ibv_recv_wr ** bad_wr;
|
||||
struct ibv_recv_wr * recv_wr;
|
||||
struct ibv_recv_wr ** bad_recv_wr;
|
||||
// Return value:
|
||||
int ret;
|
||||
} __attribute__((packed)) uhyve_ibv_post_recv_t;
|
||||
|
|
|
@ -1208,7 +1208,7 @@ void call_ibv_post_recv(struct kvm_run * run, uint8_t * guest_mem) {
|
|||
uhyve_ibv_post_recv_t * args = (uhyve_ibv_post_recv_t *) (guest_mem + data);
|
||||
|
||||
use_ib_mem_pool = true;
|
||||
args->ret = ibv_post_recv(args->qp, args->wr, args->bad_wr);
|
||||
args->ret = ibv_post_recv(args->qp, args->recv_wr, args->bad_recv_wr);
|
||||
use_ib_mem_pool = false;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,170 +1,244 @@
|
|||
BACKUP_AND_CONVERT = 2
|
||||
"""Copyright (c) 2018, Annika Wierichs, RWTH Aachen University
|
||||
|
||||
All rights reserved.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
* Neither the name of the University nor the names of its contributors
|
||||
may be used to endorse or promote products derived from this
|
||||
software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
|
||||
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
"""
|
||||
|
||||
"""
|
||||
This file contains custom address conversion routines for specific functions
|
||||
that cannot be generated automatically. If additional functions requiring custom
|
||||
code are added to the verbs API in future releases, these snippets may be added
|
||||
here (also add the function name to the list of supported functions).
|
||||
|
||||
The code for 3 different post_recv functions can be reused as their parameters
|
||||
are the same and therefore require the same pointer conversions:
|
||||
ibv_post_recv (..., struct ibv_recv_wr* recv_wr, struct ibv_recv_wr** bad_recv_wr)
|
||||
ibv_post_wq_recv (..., struct ibv_recv_wr* recv_wr, struct ibv_recv_wr** bad_recv_wr)
|
||||
ibv_post_srq_recv(..., struct ibv_recv_wr* recv_wr, struct ibv_recv_wr** bad_recv_wr)
|
||||
|
||||
TODO: These snippets currently use spaces (not tabs) at the start of lines, fix.
|
||||
"""
|
||||
|
||||
supported_functions = ["ibv_post_send",
|
||||
"ibv_post_recv",
|
||||
"ibv_post_srq_recv",
|
||||
"ibv_post_wq_recv",
|
||||
"ibv_create_rwq_ind_table"]
|
||||
|
||||
CONVERT = 2
|
||||
REVERT = 1
|
||||
|
||||
# TODO anni: andere functionen hier noch zufügen
|
||||
#
|
||||
# ibv_post_recv, ibv_post_srq_recv, ibv_post_wq_recv
|
||||
#
|
||||
|
||||
recv_wrs_backup_and_convert = \
|
||||
"""
|
||||
struct ibv_recv_wr * curr_wr;
|
||||
int num_sges_max;
|
||||
int num_wrs;
|
||||
post_recv_convert = \
|
||||
""" struct ibv_recv_wr * curr_wr;
|
||||
int num_sges_max;
|
||||
int num_wrs;
|
||||
|
||||
// Number of work requests in linked list and SGEs in each WR
|
||||
num_wrs = 0;
|
||||
num_sges_max = 0;
|
||||
for (curr_wr = recv_wr; curr_wr; curr_wr = curr_wr->next) {
|
||||
num_wrs++;
|
||||
if (num_sges_max < curr_wr->num_sge) {
|
||||
num_sges_max = curr_wr->num_sge;
|
||||
}
|
||||
}
|
||||
// Number of work requests in linked list and SGEs in each WR
|
||||
num_wrs = 0;
|
||||
num_sges_max = 0;
|
||||
for (curr_wr = recv_wr; curr_wr; curr_wr = curr_wr->next) {
|
||||
num_wrs++;
|
||||
if (num_sges_max < curr_wr->num_sge) {
|
||||
num_sges_max = curr_wr->num_sge;
|
||||
}
|
||||
}
|
||||
|
||||
// Backup arrays for original guest memory pointers
|
||||
struct ibv_recv_wr * wr__next [num_wrs];
|
||||
struct ibv_sge * wr__sg_list [num_wrs];
|
||||
uint64_t wr__sg_list__addr[num_wrs][num_sges_max];
|
||||
// Backup arrays for original guest memory pointers
|
||||
struct ibv_recv_wr * wr__next [num_wrs];
|
||||
struct ibv_sge * wr__sg_list [num_wrs];
|
||||
uint64_t wr__sg_list__addr[num_wrs][num_sges_max];
|
||||
|
||||
curr_wr = recv_wr;
|
||||
for (int w = 0; w < num_wrs; w++) {
|
||||
// Next pointer
|
||||
wr__next[w] = curr_wr->next;
|
||||
curr_wr->next = (struct ibv_recv_wr *) guest_to_host((size_t) curr_wr->next);
|
||||
curr_wr = recv_wr;
|
||||
for (int w = 0; w < num_wrs; w++) {
|
||||
// Next pointer
|
||||
wr__next[w] = curr_wr->next;
|
||||
curr_wr->next = (struct ibv_recv_wr *) guest_to_host((size_t) curr_wr->next);
|
||||
|
||||
// SGE array
|
||||
for (int s = 0; s < curr_wr->num_sge; s++) {
|
||||
wr__sg_list__addr[w][s] = curr_wr->sg_list[s].addr;
|
||||
curr_wr->sg_list[s].addr = (uint64_t) guest_to_host((size_t) curr_wr->sg_list[s].addr);
|
||||
}
|
||||
// SGE array
|
||||
for (int s = 0; s < curr_wr->num_sge; s++) {
|
||||
wr__sg_list__addr[w][s] = curr_wr->sg_list[s].addr;
|
||||
curr_wr->sg_list[s].addr = (uint64_t) guest_to_host((size_t) curr_wr->sg_list[s].addr);
|
||||
}
|
||||
|
||||
wr__sg_list[w] = curr_wr->sg_list;
|
||||
curr_wr->sg_list = (struct ibv_sge *) guest_to_host((size_t) curr_wr->sg_list);
|
||||
wr__sg_list[w] = curr_wr->sg_list;
|
||||
curr_wr->sg_list = (struct ibv_sge *) guest_to_host((size_t) curr_wr->sg_list);
|
||||
|
||||
curr_wr = wr__next[w];
|
||||
}
|
||||
curr_wr = wr__next[w];
|
||||
}
|
||||
"""
|
||||
|
||||
recv_wrs_revert = \
|
||||
"""
|
||||
if (*bad_recv_wr && *bad_recv_wr == uhyve_args.recv_wr) {
|
||||
*bad_recv_wr = recv_wr;
|
||||
}
|
||||
post_recv_revert = \
|
||||
""" if (*bad_recv_wr && *bad_recv_wr == uhyve_args.recv_wr) {
|
||||
*bad_recv_wr = recv_wr;
|
||||
}
|
||||
|
||||
curr_wr = recv_wr;
|
||||
for (int w = 0; w < num_wrs; w++) {
|
||||
if (*bad_recv_wr && *bad_recv_wr == curr_wr->next) {
|
||||
*bad_recv_wr = wr__next[w];
|
||||
}
|
||||
curr_wr = recv_wr;
|
||||
for (int w = 0; w < num_wrs; w++) {
|
||||
if (*bad_recv_wr && *bad_recv_wr == curr_wr->next) {
|
||||
*bad_recv_wr = wr__next[w];
|
||||
}
|
||||
|
||||
curr_wr->next = wr__next[w];
|
||||
curr_wr->sg_list = wr__sg_list[w];
|
||||
for (int s = 0; s < curr_wr->num_sge; s++) {
|
||||
curr_wr->sg_list[s].addr = wr__sg_list__addr[w][s];
|
||||
}
|
||||
curr_wr->next = wr__next[w];
|
||||
curr_wr->sg_list = wr__sg_list[w];
|
||||
for (int s = 0; s < curr_wr->num_sge; s++) {
|
||||
curr_wr->sg_list[s].addr = wr__sg_list__addr[w][s];
|
||||
}
|
||||
|
||||
curr_wr = curr_wr->next;
|
||||
}
|
||||
|
||||
curr_wr = curr_wr->next;
|
||||
}
|
||||
"""
|
||||
|
||||
#
|
||||
# ibv_post_send
|
||||
#
|
||||
|
||||
backup_and_convert_send_wrs = \
|
||||
"""
|
||||
struct ibv_send_wr * curr_wr;
|
||||
int num_wrs;
|
||||
int num_sges_max;
|
||||
int is_bind_mw, is_tso;
|
||||
post_send_convert = \
|
||||
""" struct ibv_send_wr * curr_wr;
|
||||
int num_wrs;
|
||||
int num_sges_max;
|
||||
int is_bind_mw, is_tso;
|
||||
|
||||
// Number of work requests in linked list and SGEs in each WR
|
||||
num_wrs = 0;
|
||||
num_sges_max = 0;
|
||||
for (curr_wr = wr; curr_wr; curr_wr = curr_wr->next) {
|
||||
num_wrs++;
|
||||
if (num_sges_max < curr_wr->num_sge) {
|
||||
num_sges_max = curr_wr->num_sge;
|
||||
}
|
||||
}
|
||||
// Number of work requests in linked list and SGEs in each WR
|
||||
num_wrs = 0;
|
||||
num_sges_max = 0;
|
||||
for (curr_wr = wr; curr_wr; curr_wr = curr_wr->next) {
|
||||
num_wrs++;
|
||||
if (num_sges_max < curr_wr->num_sge) {
|
||||
num_sges_max = curr_wr->num_sge;
|
||||
}
|
||||
}
|
||||
|
||||
// Backup arrays for original guest memory pointers
|
||||
struct ibv_send_wr * wr__next [num_wrs];
|
||||
struct ibv_sge * wr__sg_list [num_wrs];
|
||||
uint64_t wr__sg_list__addr [num_wrs][num_sges_max];
|
||||
uint64_t wr__bind_mw__bind_info__addr[num_wrs];
|
||||
void * wr__tso__hdr [num_wrs];
|
||||
// Backup arrays for original guest memory pointers
|
||||
struct ibv_send_wr * wr__next [num_wrs];
|
||||
struct ibv_sge * wr__sg_list [num_wrs];
|
||||
uint64_t wr__sg_list__addr [num_wrs][num_sges_max];
|
||||
uint64_t wr__bind_mw__bind_info__addr[num_wrs];
|
||||
void * wr__tso__hdr [num_wrs];
|
||||
|
||||
curr_wr = wr;
|
||||
for (int w = 0; w < num_wrs; w++) {
|
||||
is_bind_mw = curr_wr->opcode == IBV_WR_BIND_MW;
|
||||
is_tso = curr_wr->opcode == IBV_WR_TSO;
|
||||
curr_wr = wr;
|
||||
for (int w = 0; w < num_wrs; w++) {
|
||||
is_bind_mw = curr_wr->opcode == IBV_WR_BIND_MW;
|
||||
is_tso = curr_wr->opcode == IBV_WR_TSO;
|
||||
|
||||
// union: bind_mw and tso
|
||||
if (is_bind_mw) {
|
||||
wr__bind_mw__bind_info__addr[w] = curr_wr->bind_mw.bind_info.addr;
|
||||
curr_wr->bind_mw.bind_info.addr = (uint64_t) guest_to_host((size_t) curr_wr->bind_mw.bind_info.addr);
|
||||
} else if (is_tso) {
|
||||
wr__tso__hdr[w] = curr_wr->tso.hdr;
|
||||
curr_wr->tso.hdr = (void *) guest_to_host((size_t) curr_wr->tso.hdr);
|
||||
}
|
||||
// union: bind_mw and tso
|
||||
if (is_bind_mw) {
|
||||
wr__bind_mw__bind_info__addr[w] = curr_wr->bind_mw.bind_info.addr;
|
||||
curr_wr->bind_mw.bind_info.addr = (uint64_t) guest_to_host((size_t) curr_wr->bind_mw.bind_info.addr);
|
||||
} else if (is_tso) {
|
||||
wr__tso__hdr[w] = curr_wr->tso.hdr;
|
||||
curr_wr->tso.hdr = (void *) guest_to_host((size_t) curr_wr->tso.hdr);
|
||||
}
|
||||
|
||||
// Next pointer and SGE array
|
||||
wr__next[w] = curr_wr->next;
|
||||
curr_wr->next = (struct ibv_send_wr *) guest_to_host((size_t) curr_wr->next);
|
||||
// Next pointer and SGE array
|
||||
wr__next[w] = curr_wr->next;
|
||||
curr_wr->next = (struct ibv_send_wr *) guest_to_host((size_t) curr_wr->next);
|
||||
|
||||
for (int s = 0; s < curr_wr->num_sge; s++) {
|
||||
wr__sg_list__addr[w][s] = curr_wr->sg_list[s].addr;
|
||||
curr_wr->sg_list[s].addr = (uint64_t) guest_to_host((size_t) curr_wr->sg_list[s].addr);
|
||||
}
|
||||
for (int s = 0; s < curr_wr->num_sge; s++) {
|
||||
wr__sg_list__addr[w][s] = curr_wr->sg_list[s].addr;
|
||||
curr_wr->sg_list[s].addr = (uint64_t) guest_to_host((size_t) curr_wr->sg_list[s].addr);
|
||||
}
|
||||
|
||||
wr__sg_list[w] = curr_wr->sg_list;
|
||||
curr_wr->sg_list = (struct ibv_sge *) guest_to_host((size_t) curr_wr->sg_list);
|
||||
wr__sg_list[w] = curr_wr->sg_list;
|
||||
curr_wr->sg_list = (struct ibv_sge *) guest_to_host((size_t) curr_wr->sg_list);
|
||||
|
||||
curr_wr = wr__next[w];
|
||||
}
|
||||
|
||||
curr_wr = wr__next[w];
|
||||
}
|
||||
"""
|
||||
|
||||
send_wrs_revert = \
|
||||
"""
|
||||
if (*bad_wr && *bad_wr == uhyve_args.wr) {
|
||||
*bad_wr = wr;
|
||||
}
|
||||
post_send_revert = \
|
||||
""" if (*bad_wr && *bad_wr == uhyve_args.wr) {
|
||||
*bad_wr = wr;
|
||||
}
|
||||
|
||||
curr_wr = wr;
|
||||
for (int w = 0; w < num_wrs; w++) {
|
||||
is_bind_mw = curr_wr->opcode == IBV_WR_BIND_MW;
|
||||
is_tso = curr_wr->opcode == IBV_WR_TSO;
|
||||
curr_wr = wr;
|
||||
for (int w = 0; w < num_wrs; w++) {
|
||||
is_bind_mw = curr_wr->opcode == IBV_WR_BIND_MW;
|
||||
is_tso = curr_wr->opcode == IBV_WR_TSO;
|
||||
|
||||
// union: bind_mw and tso
|
||||
if (is_bind_mw) {
|
||||
curr_wr->bind_mw.bind_info.addr = wr__bind_mw__bind_info__addr[w];
|
||||
} else if (is_tso) {
|
||||
curr_wr->tso.hdr = wr__tso__hdr[w];
|
||||
}
|
||||
// union: bind_mw and tso
|
||||
if (is_bind_mw) {
|
||||
curr_wr->bind_mw.bind_info.addr = wr__bind_mw__bind_info__addr[w];
|
||||
} else if (is_tso) {
|
||||
curr_wr->tso.hdr = wr__tso__hdr[w];
|
||||
}
|
||||
|
||||
// Bad request
|
||||
if (*bad_wr && *bad_wr == curr_wr->next) {
|
||||
*bad_wr = wr__next[w];
|
||||
}
|
||||
// Bad request
|
||||
if (*bad_wr && *bad_wr == curr_wr->next) {
|
||||
*bad_wr = wr__next[w];
|
||||
}
|
||||
|
||||
// Next pointer and SGE array
|
||||
curr_wr->next = wr__next[w];
|
||||
curr_wr->sg_list = wr__sg_list[w];
|
||||
for (int s = 0; s < curr_wr->num_sge; s++) {
|
||||
curr_wr->sg_list[s].addr = wr__sg_list__addr[w][s];
|
||||
}
|
||||
// Next pointer and SGE array
|
||||
curr_wr->next = wr__next[w];
|
||||
curr_wr->sg_list = wr__sg_list[w];
|
||||
for (int s = 0; s < curr_wr->num_sge; s++) {
|
||||
curr_wr->sg_list[s].addr = wr__sg_list__addr[w][s];
|
||||
}
|
||||
|
||||
curr_wr = curr_wr->next;
|
||||
}
|
||||
|
||||
curr_wr = curr_wr->next;
|
||||
}
|
||||
"""
|
||||
|
||||
#
|
||||
# ibv_create_rwq_ind_table
|
||||
#
|
||||
|
||||
create_rwq_ind_table_convert = \
|
||||
""" // TODO: Entries of the list should be universal references residing in the IB memory pool.
|
||||
// Confirm this.
|
||||
struct ibv_wq ** init_attr__ind_tbl = init_attr->ind_tbl;
|
||||
init_attr->ind_tbl = (struct ibv_wq **) guest_to_host((size_t) init_attr->ind_tbl);
|
||||
|
||||
"""
|
||||
|
||||
create_rwq_ind_table_revert = \
|
||||
""" init_attr->ind_tbl = init_attr__ind_tbl;
|
||||
|
||||
"""
|
||||
|
||||
def generate(name, part):
|
||||
if name is "ibv_post_send":
|
||||
if part is BACKUP_AND_CONVERT:
|
||||
return send_wrs_backup_and_convert
|
||||
if name == "ibv_post_send":
|
||||
if part == CONVERT:
|
||||
return post_send_convert
|
||||
else:
|
||||
return send_wrs_revert
|
||||
return post_send_revert
|
||||
|
||||
if name is "ibv_post_recv" or name is "ibv_post_srq_recv" or name is "ibv_post_wq_recv":
|
||||
if part is BACKUP_AND_CONVERT:
|
||||
return recv_wrs_backup_and_convert
|
||||
elif name == "ibv_post_wq_recv" or name == "ibv_post_recv" or name == "ibv_post_srq_recv":
|
||||
if part == CONVERT:
|
||||
return post_recv_convert
|
||||
else:
|
||||
return recv_wrs_revert
|
||||
return post_recv_revert
|
||||
|
||||
elif name == "ibv_create_rwq_ind_table":
|
||||
if part == CONVERT:
|
||||
return create_rwq_ind_table_convert
|
||||
else:
|
||||
return create_rwq_ind_table_revert
|
||||
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#!/usr/bin/env python
|
||||
"""Copyright (c) 2017, Annika Wierichs, RWTH Aachen University
|
||||
"""Copyright (c) 2018, Annika Wierichs, RWTH Aachen University
|
||||
|
||||
All rights reserved.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
|
@ -27,7 +27,10 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
TODO, docs
|
||||
"""
|
||||
|
||||
# TODO: Add ibv_resolve_eth_l2_from_gid function back in. Not linking right now.
|
||||
import custom_snippets
|
||||
|
||||
# TODO: ibv_resolve_eth_l2_from_gid function does not work.
|
||||
# TODO: ibv_open_xrcd function might not work. Confirm.
|
||||
|
||||
|
||||
# Path of the input file containing function prototypes.
|
||||
|
@ -46,11 +49,11 @@ PORT_NUMBER_START = 0x610
|
|||
|
||||
restricted_resources = ["struct ibv_send_wr",
|
||||
"struct ibv_recv_wr",
|
||||
"struct ibv_sge",
|
||||
"struct ibv_xrcd_init_attr",
|
||||
"struct ibv_rwq_ind_table_init_attr",
|
||||
|
||||
# Deep resources that are not used as parameters to any functions
|
||||
# Deep resources that are not used as direct function parameters
|
||||
"struct ibv_sge",
|
||||
"struct ibv_mw_bind_info",
|
||||
"struct ibv_rx_hash_conf"]
|
||||
|
||||
|
@ -334,24 +337,14 @@ def generate_hermit_function_definition(fnc):
|
|||
code += "\tuhyve_args.{0} = {0};\n".format(param.name)
|
||||
code += "\n"
|
||||
|
||||
if fnc.is_restricted():
|
||||
code += snippet.generate(fnc.name, snippet.BACKUP_AND_CONVERT)
|
||||
|
||||
# for param in fnc.params:
|
||||
# if param.is_restricted():
|
||||
# code += "\t// TODO: Take care of pointer conversions in " + param.name + ".\n"
|
||||
# code += "\n" if param is fnc.params[-1] else ""
|
||||
if fnc.name in custom_snippets.supported_functions:
|
||||
code += custom_snippets.generate(fnc.name, custom_snippets.CONVERT)
|
||||
|
||||
code += ("\tuhyve_send({0}, (unsigned) virt_to_phys((size_t) &uhyve_args));\n\n"
|
||||
.format(fnc.port_name))
|
||||
|
||||
# for param in fnc.params:
|
||||
# if param.is_restricted():
|
||||
# code += "\t// TODO: Take care of reverse pointer conversions in " + param.name + ".\n"
|
||||
# code += "\n" if param is fnc.params[-1] else ""
|
||||
|
||||
if fnc.is_restricted():
|
||||
code += snippet.generate(fnc.name, snippet.REVERT)
|
||||
if fnc.name in custom_snippets.supported_functions:
|
||||
code += custom_snippets.generate(fnc.name, custom_snippets.REVERT)
|
||||
|
||||
if not fnc.ret.is_void():
|
||||
code += "\treturn uhyve_args.ret;\n"
|
||||
|
|
Loading…
Add table
Reference in a new issue