1
0
Fork 0
mirror of https://github.com/hermitcore/libhermit.git synced 2025-03-09 00:00:03 +01:00

replaces asprintf + some clean up

This commit is contained in:
Annika Wierichs 2018-01-24 16:15:42 +01:00
parent 647b8e758d
commit 92e22c28ac
4 changed files with 4 additions and 982 deletions

View file

@ -393,32 +393,3 @@ oom:
LOG_ERROR("BUG: Failed to init mm!\n");
while(1) {HALT; }
}
/* void * ib_memory_init(void) */
/* { */
/* size_t phyaddr, viraddr, bits; */
/* int err; */
/* phyaddr = (size_t) &kernel_end - IB_MEMORY_SIZE; */
/* bits = PG_RW|PG_GLOBAL|PG_NX; */
/* viraddr = vma_alloc(IB_MEMORY_NPAGES * PAGE_SIZE, VMA_READ|VMA_WRITE|VMA_CACHEABLE); */
/* if (BUILTIN_EXPECT(!viraddr, 0)) { */
/* LOG_INFO("BUILTIN_EXPECT failed: ib_memory_init 1\n"); */
/* return NULL; */
/* } */
/* LOG_INFO("ib_memory_init, size: %lu\n", IB_MEMORY_SIZE); */
/* LOG_INFO("\tGuest Phys Start: %p\tEnd: %p\n", (uint8_t *) phyaddr, (uint8_t *) &kernel_end); */
/* [> LOG_INFO("\tHost Virt Start: %p\tEnd: %p\n", <] */
/* [> phyaddr + host_kernel_start, (size_t) &kernel_end + host_kernel_start); <] */
/* err = page_map(viraddr, phyaddr, IB_MEMORY_NPAGES, bits); */
/* if (BUILTIN_EXPECT(err, 0)) { */
/* LOG_INFO("BUILTIN_EXPECT failed: ib_memory_init 2\n"); */
/* vma_free(viraddr, viraddr + IB_MEMORY_NPAGES*PAGE_SIZE); */
/* return NULL; */
/* } */
/* LOG_INFO("ib_memory_init finished\n"); */
/* return (void *) viraddr; */
/* } */

View file

@ -47,10 +47,6 @@
#include <pthread.h>
#endif
//#include <stdint.h> // not needed for data structures
//#include <linux/types.h>
#ifdef __cplusplus
# define BEGIN_C_DECLS extern "C" {
@ -114,11 +110,8 @@ struct timespec {
__kernel_time_t tv_sec; /* seconds */
long tv_nsec; /* nanoseconds */
};
#endif
union ibv_gid {
uint8_t raw[16];
struct {
@ -654,33 +647,6 @@ enum ibv_rate {
IBV_RATE_300_GBPS = 18
};
/**
* ibv_rate_to_mult - Convert the IB rate enum to a multiple of the
* base rate of 2.5 Gbit/sec. For example, IBV_RATE_5_GBPS will be
* converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
* @rate: rate to convert.
*/
//int __attribute_const ibv_rate_to_mult(enum ibv_rate rate);
/**
* mult_to_ibv_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate enum.
* @mult: multiple to convert.
*/
//enum ibv_rate __attribute_const mult_to_ibv_rate(int mult);
/**
* ibv_rate_to_mbps - Convert the IB rate enum to Mbit/sec.
* For example, IBV_RATE_5_GBPS will return the value 5000.
* @rate: rate to convert.
*/
//int __attribute_const ibv_rate_to_mbps(enum ibv_rate rate);
/**
* mbps_to_ibv_rate - Convert a Mbit/sec value to an IB rate enum.
* @mbps: value to convert.
*/
//enum ibv_rate __attribute_const mbps_to_ibv_rate(int mbps) __attribute_const;
struct ibv_ah_attr {
struct ibv_global_route grh;
uint16_t dlid;
@ -1183,108 +1149,6 @@ struct ibv_cq_ex {
uint32_t (*read_flow_tag)(struct ibv_cq_ex *current);
};
//static inline struct ibv_cq *ibv_cq_ex_to_cq(struct ibv_cq_ex *cq)
//{
//return (struct ibv_cq *)cq;
//}
//static inline int ibv_start_poll(struct ibv_cq_ex *cq,
//struct ibv_poll_cq_attr *attr)
//{
//return cq->start_poll(cq, attr);
//}
//static inline int ibv_next_poll(struct ibv_cq_ex *cq)
//{
//return cq->next_poll(cq);
//}
//static inline void ibv_end_poll(struct ibv_cq_ex *cq)
//{
//cq->end_poll(cq);
//}
//static inline enum ibv_wc_opcode ibv_wc_read_opcode(struct ibv_cq_ex *cq)
//{
//return cq->read_opcode(cq);
//}
//static inline uint32_t ibv_wc_read_vendor_err(struct ibv_cq_ex *cq)
//{
//return cq->read_vendor_err(cq);
//}
//static inline uint32_t ibv_wc_read_byte_len(struct ibv_cq_ex *cq)
//{
//return cq->read_byte_len(cq);
//}
//static inline __be32 ibv_wc_read_imm_data(struct ibv_cq_ex *cq)
//{
//return cq->read_imm_data(cq);
//}
//static inline uint32_t ibv_wc_read_invalidated_rkey(struct ibv_cq_ex *cq)
//{
//#ifdef __CHECKER__
//return (__attribute__((force)) uint32_t)cq->read_imm_data(cq);
//#else
//return cq->read_imm_data(cq);
//#endif
//}
//static inline uint32_t ibv_wc_read_qp_num(struct ibv_cq_ex *cq)
//{
//return cq->read_qp_num(cq);
//}
//static inline uint32_t ibv_wc_read_src_qp(struct ibv_cq_ex *cq)
//{
//return cq->read_src_qp(cq);
//}
//static inline int ibv_wc_read_wc_flags(struct ibv_cq_ex *cq)
//{
//return cq->read_wc_flags(cq);
//}
//static inline uint32_t ibv_wc_read_slid(struct ibv_cq_ex *cq)
//{
//return cq->read_slid(cq);
//}
//static inline uint8_t ibv_wc_read_sl(struct ibv_cq_ex *cq)
//{
//return cq->read_sl(cq);
//}
//static inline uint8_t ibv_wc_read_dlid_path_bits(struct ibv_cq_ex *cq)
//{
//return cq->read_dlid_path_bits(cq);
//}
//static inline uint64_t ibv_wc_read_completion_ts(struct ibv_cq_ex *cq)
//{
//return cq->read_completion_ts(cq);
//}
//static inline uint16_t ibv_wc_read_cvlan(struct ibv_cq_ex *cq)
//{
//return cq->read_cvlan(cq);
//}
//static inline uint32_t ibv_wc_read_flow_tag(struct ibv_cq_ex *cq)
//{
//return cq->read_flow_tag(cq);
//}
//static inline int ibv_post_wq_recv(struct ibv_wq *wq,
//struct ibv_recv_wr *recv_wr,
//struct ibv_recv_wr **bad_recv_wr)
//{
//return wq->post_recv(wq, recv_wr, bad_recv_wr);
//}
struct ibv_ah {
struct ibv_context *context;
struct ibv_pd *pd;
@ -1644,820 +1508,10 @@ struct verbs_context {
struct ibv_context context; /* Must be last field in the struct */
};
//static inline struct verbs_context *verbs_get_ctx(struct ibv_context *ctx)
//{
//return (ctx->abi_compat != __VERBS_ABI_IS_EXTENDED) ?
//NULL : container_of(ctx, struct verbs_context, context);
//}
/* #define verbs_get_ctx_op(ctx, op) ({ \
struct verbs_context *__vctx = verbs_get_ctx(ctx); \
(!__vctx || (__vctx->sz < sizeof(*__vctx) - offsetof(struct verbs_context, op)) || \
!__vctx->op) ? NULL : __vctx; })
#define verbs_set_ctx_op(_vctx, op, ptr) ({ \
struct verbs_context *vctx = _vctx; \
if (vctx && (vctx->sz >= sizeof(*vctx) - offsetof(struct verbs_context, op))) \
vctx->op = ptr; }) */
///**
// * ibv_get_device_list - Get list of IB devices currently available
// * @num_devices: optional. if non-NULL, set to the number of devices
// * returned in the array.
// *
// * Return a NULL-terminated array of IB devices. The array can be
// * released with ibv_free_device_list().
// */
//struct ibv_device **ibv_get_device_list(int *num_devices);
//
///**
// * ibv_free_device_list - Free list from ibv_get_device_list()
// *
// * Free an array of devices returned from ibv_get_device_list(). Once
// * the array is freed, pointers to devices that were not opened with
// * ibv_open_device() are no longer valid. Client code must open all
// * devices it intends to use before calling ibv_free_device_list().
// */
//void ibv_free_device_list(struct ibv_device **list);
//
///**
// * ibv_get_device_name - Return kernel device name
// */
//const char *ibv_get_device_name(struct ibv_device *device);
//
///**
// * ibv_get_device_guid - Return device's node GUID
// */
//__be64 ibv_get_device_guid(struct ibv_device *device);
//
///**
// * ibv_open_device - Initialize device for use
// */
//struct ibv_context *ibv_open_device(struct ibv_device *device);
//
///**
// * ibv_close_device - Release device
// */
//int ibv_close_device(struct ibv_context *context);
//
///**
// * ibv_get_async_event - Get next async event
// * @event: Pointer to use to return async event
// *
// * All async events returned by ibv_get_async_event() must eventually
// * be acknowledged with ibv_ack_async_event().
// */
//int ibv_get_async_event(struct ibv_context *context,
// struct ibv_async_event *event);
//
///**
// * ibv_ack_async_event - Acknowledge an async event
// * @event: Event to be acknowledged.
// *
// * All async events which are returned by ibv_get_async_event() must
// * be acknowledged. To avoid races, destroying an object (CQ, SRQ or
// * QP) will wait for all affiliated events to be acknowledged, so
// * there should be a one-to-one correspondence between acks and
// * successful gets.
// */
//void ibv_ack_async_event(struct ibv_async_event *event);
//
///**
// * ibv_query_device - Get device properties
// */
//int ibv_query_device(struct ibv_context *context,
// struct ibv_device_attr *device_attr);
/**
* ibv_query_port - Get port properties
*/
// int ibv_query_port(struct ibv_context *context, uint8_t port_num,
// struct ibv_port_attr *port_attr);
// static inline int ___ibv_query_port(struct ibv_context *context,
// uint8_t port_num,
// struct ibv_port_attr *port_attr)
// {
// [> For compatibility when running with old libibverbs <]
// port_attr->link_layer = IBV_LINK_LAYER_UNSPECIFIED;
// port_attr->reserved = 0;
// return ibv_query_port(context, port_num, port_attr);
// }
/* #define ibv_query_port(context, port_num, port_attr) \
___ibv_query_port(context, port_num, port_attr) */
///**
// * ibv_query_gid - Get a GID table entry
// */
//int ibv_query_gid(struct ibv_context *context, uint8_t port_num,
// int index, union ibv_gid *gid);
//
///**
// * ibv_query_pkey - Get a P_Key table entry
// */
//int ibv_query_pkey(struct ibv_context *context, uint8_t port_num,
// int index, __be16 *pkey);
//
///**
// * ibv_alloc_pd - Allocate a protection domain
// */
//struct ibv_pd *ibv_alloc_pd(struct ibv_context *context);
//
///**
// * ibv_dealloc_pd - Free a protection domain
// */
//int ibv_dealloc_pd(struct ibv_pd *pd);
//
//static inline struct ibv_flow *ibv_create_flow(struct ibv_qp *qp,
// struct ibv_flow_attr *flow)
//{
// struct verbs_context *vctx = verbs_get_ctx_op(qp->context,
// ibv_create_flow);
// if (!vctx || !vctx->ibv_create_flow) {
// errno = ENOSYS;
// return NULL;
// }
//
// return vctx->ibv_create_flow(qp, flow);
//}
//
//static inline int ibv_destroy_flow(struct ibv_flow *flow_id)
//{
// struct verbs_context *vctx = verbs_get_ctx_op(flow_id->context,
// ibv_destroy_flow);
// if (!vctx || !vctx->ibv_destroy_flow)
// return -ENOSYS;
// return vctx->ibv_destroy_flow(flow_id);
//}
//
///**
// * ibv_open_xrcd - Open an extended connection domain
// */
//static inline struct ibv_xrcd *
//ibv_open_xrcd(struct ibv_context *context, struct ibv_xrcd_init_attr *xrcd_init_attr)
//{
// struct verbs_context *vctx = verbs_get_ctx_op(context, open_xrcd);
// if (!vctx) {
// errno = ENOSYS;
// return NULL;
// }
// return vctx->open_xrcd(context, xrcd_init_attr);
//}
//
///**
// * ibv_close_xrcd - Close an extended connection domain
// */
//static inline int ibv_close_xrcd(struct ibv_xrcd *xrcd)
//{
// struct verbs_context *vctx = verbs_get_ctx(xrcd->context);
// return vctx->close_xrcd(xrcd);
//}
//
///**
// * ibv_reg_mr - Register a memory region
// */
//struct ibv_mr *ibv_reg_mr(struct ibv_pd *pd, void *addr,
// size_t length, int access);
//
//
//enum ibv_rereg_mr_err_code {
// /* Old MR is valid, invalid input */
// IBV_REREG_MR_ERR_INPUT = -1,
// /* Old MR is valid, failed via don't fork on new address range */
// IBV_REREG_MR_ERR_DONT_FORK_NEW = -2,
// /* New MR is valid, failed via do fork on old address range */
// IBV_REREG_MR_ERR_DO_FORK_OLD = -3,
// /* MR shouldn't be used, command error */
// IBV_REREG_MR_ERR_CMD = -4,
// /* MR shouldn't be used, command error, invalid fork state on new address range */
// IBV_REREG_MR_ERR_CMD_AND_DO_FORK_NEW = -5,
//};
//
///**
// * ibv_rereg_mr - Re-Register a memory region
// */
//int ibv_rereg_mr(struct ibv_mr *mr, int flags,
// struct ibv_pd *pd, void *addr,
// size_t length, int access);
///**
// * ibv_dereg_mr - Deregister a memory region
// */
//int ibv_dereg_mr(struct ibv_mr *mr);
//
///**
// * ibv_alloc_mw - Allocate a memory window
// */
//static inline struct ibv_mw *ibv_alloc_mw(struct ibv_pd *pd,
// enum ibv_mw_type type)
//{
// struct ibv_mw *mw;
//
// if (!pd->context->ops.alloc_mw) {
// errno = ENOSYS;
// return NULL;
// }
//
// mw = pd->context->ops.alloc_mw(pd, type);
// return mw;
//}
//
///**
// * ibv_dealloc_mw - Free a memory window
// */
//static inline int ibv_dealloc_mw(struct ibv_mw *mw)
//{
// return mw->context->ops.dealloc_mw(mw);
//}
//
///**
// * ibv_inc_rkey - Increase the 8 lsb in the given rkey
// */
//static inline uint32_t ibv_inc_rkey(uint32_t rkey)
//{
// const uint32_t mask = 0x000000ff;
// uint8_t newtag = (uint8_t)((rkey + 1) & mask);
//
// return (rkey & ~mask) | newtag;
//}
//
///**
// * ibv_bind_mw - Bind a memory window to a region
// */
//static inline int ibv_bind_mw(struct ibv_qp *qp, struct ibv_mw *mw,
// struct ibv_mw_bind *mw_bind)
//{
// if (mw->type != IBV_MW_TYPE_1)
// return EINVAL;
//
// return mw->context->ops.bind_mw(qp, mw, mw_bind);
//}
//
///**
// * ibv_create_comp_channel - Create a completion event channel
// */
//struct ibv_comp_channel *ibv_create_comp_channel(struct ibv_context *context);
//
///**
// * ibv_destroy_comp_channel - Destroy a completion event channel
// */
//int ibv_destroy_comp_channel(struct ibv_comp_channel *channel);
//
///**
// * ibv_create_cq - Create a completion queue
// * @context - Context CQ will be attached to
// * @cqe - Minimum number of entries required for CQ
// * @cq_context - Consumer-supplied context returned for completion events
// * @channel - Completion channel where completion events will be queued.
// * May be NULL if completion events will not be used.
// * @comp_vector - Completion vector used to signal completion events.
// * Must be >= 0 and < context->num_comp_vectors.
// */
//struct ibv_cq *ibv_create_cq(struct ibv_context *context, int cqe,
// void *cq_context,
// struct ibv_comp_channel *channel,
// int comp_vector);
//
///**
// * ibv_create_cq_ex - Create a completion queue
// * @context - Context CQ will be attached to
// * @cq_attr - Attributes to create the CQ with
// */
//static inline
//struct ibv_cq_ex *ibv_create_cq_ex(struct ibv_context *context,
// struct ibv_cq_init_attr_ex *cq_attr)
//{
// struct verbs_context *vctx = verbs_get_ctx_op(context, create_cq_ex);
//
// if (!vctx) {
// errno = ENOSYS;
// return NULL;
// }
//
// if (cq_attr->comp_mask & ~(IBV_CQ_INIT_ATTR_MASK_RESERVED - 1)) {
// errno = EINVAL;
// return NULL;
// }
//
// return vctx->create_cq_ex(context, cq_attr);
//}
//
///**
// * ibv_resize_cq - Modifies the capacity of the CQ.
// * @cq: The CQ to resize.
// * @cqe: The minimum size of the CQ.
// *
// * Users can examine the cq structure to determine the actual CQ size.
// */
//int ibv_resize_cq(struct ibv_cq *cq, int cqe);
//
///**
// * ibv_destroy_cq - Destroy a completion queue
// */
//int ibv_destroy_cq(struct ibv_cq *cq);
//
///**
// * ibv_get_cq_event - Read next CQ event
// * @channel: Channel to get next event from.
// * @cq: Used to return pointer to CQ.
// * @cq_context: Used to return consumer-supplied CQ context.
// *
// * All completion events returned by ibv_get_cq_event() must
// * eventually be acknowledged with ibv_ack_cq_events().
// */
//int ibv_get_cq_event(struct ibv_comp_channel *channel,
// struct ibv_cq **cq, void **cq_context);
//
///**
// * ibv_ack_cq_events - Acknowledge CQ completion events
// * @cq: CQ to acknowledge events for
// * @nevents: Number of events to acknowledge.
// *
// * All completion events which are returned by ibv_get_cq_event() must
// * be acknowledged. To avoid races, ibv_destroy_cq() will wait for
// * all completion events to be acknowledged, so there should be a
// * one-to-one correspondence between acks and successful gets. An
// * application may accumulate multiple completion events and
// * acknowledge them in a single call to ibv_ack_cq_events() by passing
// * the number of events to ack in @nevents.
// */
//void ibv_ack_cq_events(struct ibv_cq *cq, unsigned int nevents);
//
///**
// * ibv_poll_cq - Poll a CQ for work completions
// * @cq:the CQ being polled
// * @num_entries:maximum number of completions to return
// * @wc:array of at least @num_entries of &struct ibv_wc where completions
// * will be returned
// *
// * Poll a CQ for (possibly multiple) completions. If the return value
// * is < 0, an error occurred. If the return value is >= 0, it is the
// * number of completions returned. If the return value is
// * non-negative and strictly less than num_entries, then the CQ was
// * emptied.
// */
//static inline int ibv_poll_cq(struct ibv_cq *cq, int num_entries, struct ibv_wc *wc)
//{
// return cq->context->ops.poll_cq(cq, num_entries, wc);
//}
//
///**
// * ibv_req_notify_cq - Request completion notification on a CQ. An
// * event will be added to the completion channel associated with the
// * CQ when an entry is added to the CQ.
// * @cq: The completion queue to request notification for.
// * @solicited_only: If non-zero, an event will be generated only for
// * the next solicited CQ entry. If zero, any CQ entry, solicited or
// * not, will generate an event.
// */
//static inline int ibv_req_notify_cq(struct ibv_cq *cq, int solicited_only)
//{
// return cq->context->ops.req_notify_cq(cq, solicited_only);
//}
//
///**
// * ibv_create_srq - Creates a SRQ associated with the specified protection
// * domain.
// * @pd: The protection domain associated with the SRQ.
// * @srq_init_attr: A list of initial attributes required to create the SRQ.
// *
// * srq_attr->max_wr and srq_attr->max_sge are read the determine the
// * requested size of the SRQ, and set to the actual values allocated
// * on return. If ibv_create_srq() succeeds, then max_wr and max_sge
// * will always be at least as large as the requested values.
// */
//struct ibv_srq *ibv_create_srq(struct ibv_pd *pd,
// struct ibv_srq_init_attr *srq_init_attr);
//
//static inline struct ibv_srq *
//ibv_create_srq_ex(struct ibv_context *context,
// struct ibv_srq_init_attr_ex *srq_init_attr_ex)
//{
// struct verbs_context *vctx;
// uint32_t mask = srq_init_attr_ex->comp_mask;
//
// if (!(mask & ~(IBV_SRQ_INIT_ATTR_PD | IBV_SRQ_INIT_ATTR_TYPE)) &&
// (mask & IBV_SRQ_INIT_ATTR_PD) &&
// (!(mask & IBV_SRQ_INIT_ATTR_TYPE) ||
// (srq_init_attr_ex->srq_type == IBV_SRQT_BASIC)))
// return ibv_create_srq(srq_init_attr_ex->pd,
// (struct ibv_srq_init_attr *)srq_init_attr_ex);
//
// vctx = verbs_get_ctx_op(context, create_srq_ex);
// if (!vctx) {
// errno = ENOSYS;
// return NULL;
// }
// return vctx->create_srq_ex(context, srq_init_attr_ex);
//}
//
///**
// * ibv_modify_srq - Modifies the attributes for the specified SRQ.
// * @srq: The SRQ to modify.
// * @srq_attr: On input, specifies the SRQ attributes to modify. On output,
// * the current values of selected SRQ attributes are returned.
// * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
// * are being modified.
// *
// * The mask may contain IBV_SRQ_MAX_WR to resize the SRQ and/or
// * IBV_SRQ_LIMIT to set the SRQ's limit and request notification when
// * the number of receives queued drops below the limit.
// */
//int ibv_modify_srq(struct ibv_srq *srq,
// struct ibv_srq_attr *srq_attr,
// int srq_attr_mask);
//
///**
// * ibv_query_srq - Returns the attribute list and current values for the
// * specified SRQ.
// * @srq: The SRQ to query.
// * @srq_attr: The attributes of the specified SRQ.
// */
//int ibv_query_srq(struct ibv_srq *srq, struct ibv_srq_attr *srq_attr);
//
//static inline int ibv_get_srq_num(struct ibv_srq *srq, uint32_t *srq_num)
//{
// struct verbs_context *vctx = verbs_get_ctx_op(srq->context, get_srq_num);
//
// if (!vctx)
// return ENOSYS;
//
// return vctx->get_srq_num(srq, srq_num);
//}
//
///**
// * ibv_destroy_srq - Destroys the specified SRQ.
// * @srq: The SRQ to destroy.
// */
//int ibv_destroy_srq(struct ibv_srq *srq);
//
///**
// * ibv_post_srq_recv - Posts a list of work requests to the specified SRQ.
// * @srq: The SRQ to post the work request on.
// * @recv_wr: A list of work requests to post on the receive queue.
// * @bad_recv_wr: On an immediate failure, this parameter will reference
// * the work request that failed to be posted on the QP.
// */
//static inline int ibv_post_srq_recv(struct ibv_srq *srq,
// struct ibv_recv_wr *recv_wr,
// struct ibv_recv_wr **bad_recv_wr)
//{
// return srq->context->ops.post_srq_recv(srq, recv_wr, bad_recv_wr);
//}
//
///**
// * ibv_create_qp - Create a queue pair.
// */
//struct ibv_qp *ibv_create_qp(struct ibv_pd *pd,
// struct ibv_qp_init_attr *qp_init_attr);
//
//static inline struct ibv_qp *
//ibv_create_qp_ex(struct ibv_context *context, struct ibv_qp_init_attr_ex *qp_init_attr_ex)
//{
// struct verbs_context *vctx;
// uint32_t mask = qp_init_attr_ex->comp_mask;
//
// if (mask == IBV_QP_INIT_ATTR_PD)
// return ibv_create_qp(qp_init_attr_ex->pd,
// (struct ibv_qp_init_attr *)qp_init_attr_ex);
//
// vctx = verbs_get_ctx_op(context, create_qp_ex);
// if (!vctx) {
// errno = ENOSYS;
// return NULL;
// }
// return vctx->create_qp_ex(context, qp_init_attr_ex);
//}
//
///**
// * ibv_query_rt_values_ex - Get current real time @values of a device.
// * @values - in/out - defines the attributes we need to query/queried.
// * (Or's bits of enum ibv_values_mask on values->comp_mask field)
// */
//static inline int
//ibv_query_rt_values_ex(struct ibv_context *context,
// struct ibv_values_ex *values)
//{
// struct verbs_context *vctx;
//
// vctx = verbs_get_ctx_op(context, query_rt_values);
// if (!vctx)
// return ENOSYS;
//
// if (values->comp_mask & ~(IBV_VALUES_MASK_RESERVED - 1))
// return EINVAL;
//
// return vctx->query_rt_values(context, values);
//}
//
///**
// * ibv_query_device_ex - Get extended device properties
// */
//static inline int
//ibv_query_device_ex(struct ibv_context *context,
// const struct ibv_query_device_ex_input *input,
// struct ibv_device_attr_ex *attr)
//{
// struct verbs_context *vctx;
// int ret;
//
// vctx = verbs_get_ctx_op(context, query_device_ex);
// if (!vctx)
// goto legacy;
//
// ret = vctx->query_device_ex(context, input, attr, sizeof(*attr));
// if (ret == ENOSYS)
// goto legacy;
//
// return ret;
//
//legacy:
// memset(attr, 0, sizeof(*attr));
// ret = ibv_query_device(context, &attr->orig_attr);
//
// return ret;
//}
//
///**
// * ibv_open_qp - Open a shareable queue pair.
// */
//static inline struct ibv_qp *
//ibv_open_qp(struct ibv_context *context, struct ibv_qp_open_attr *qp_open_attr)
//{
// struct verbs_context *vctx = verbs_get_ctx_op(context, open_qp);
// if (!vctx) {
// errno = ENOSYS;
// return NULL;
// }
// return vctx->open_qp(context, qp_open_attr);
//}
//
///**
// * ibv_modify_qp - Modify a queue pair.
// */
//int ibv_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
// int attr_mask);
//
///**
// * ibv_query_qp - Returns the attribute list and current values for the
// * specified QP.
// * @qp: The QP to query.
// * @attr: The attributes of the specified QP.
// * @attr_mask: A bit-mask used to select specific attributes to query.
// * @init_attr: Additional attributes of the selected QP.
// *
// * The qp_attr_mask may be used to limit the query to gathering only the
// * selected attributes.
// */
//int ibv_query_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
// int attr_mask,
// struct ibv_qp_init_attr *init_attr);
//
///**
// * ibv_destroy_qp - Destroy a queue pair.
// */
//int ibv_destroy_qp(struct ibv_qp *qp);
//
///*
// * ibv_create_wq - Creates a WQ associated with the specified protection
// * domain.
// * @context: ibv_context.
// * @wq_init_attr: A list of initial attributes required to create the
// * WQ. If WQ creation succeeds, then the attributes are updated to
// * the actual capabilities of the created WQ.
// *
// * wq_init_attr->max_wr and wq_init_attr->max_sge determine
// * the requested size of the WQ, and set to the actual values allocated
// * on return.
// * If ibv_create_wq() succeeds, then max_wr and max_sge will always be
// * at least as large as the requested values.
// *
// * Return Value
// * ibv_create_wq() returns a pointer to the created WQ, or NULL if the request
// * fails.
// */
//static inline struct ibv_wq *ibv_create_wq(struct ibv_context *context,
// struct ibv_wq_init_attr *wq_init_attr)
//{
// struct verbs_context *vctx = verbs_get_ctx_op(context, create_wq);
// struct ibv_wq *wq;
//
// if (!vctx) {
// errno = ENOSYS;
// return NULL;
// }
//
// wq = vctx->create_wq(context, wq_init_attr);
// if (wq) {
// wq->events_completed = 0;
// pthread_mutex_init(&wq->mutex, NULL);
// pthread_cond_init(&wq->cond, NULL);
// }
//
// return wq;
//}
//
///*
// * ibv_modify_wq - Modifies the attributes for the specified WQ.
// * @wq: The WQ to modify.
// * @wq_attr: On input, specifies the WQ attributes to modify.
// * wq_attr->attr_mask: A bit-mask used to specify which attributes of the WQ
// * are being modified.
// * On output, the current values of selected WQ attributes are returned.
// *
// * Return Value
// * ibv_modify_wq() returns 0 on success, or the value of errno
// * on failure (which indicates the failure reason).
// *
//*/
//static inline int ibv_modify_wq(struct ibv_wq *wq, struct ibv_wq_attr *wq_attr)
//{
// struct verbs_context *vctx = verbs_get_ctx_op(wq->context, modify_wq);
//
// if (!vctx)
// return ENOSYS;
//
// return vctx->modify_wq(wq, wq_attr);
//}
//
///*
// * ibv_destroy_wq - Destroys the specified WQ.
// * @ibv_wq: The WQ to destroy.
// * Return Value
// * ibv_destroy_wq() returns 0 on success, or the value of errno
// * on failure (which indicates the failure reason).
//*/
//static inline int ibv_destroy_wq(struct ibv_wq *wq)
//{
// struct verbs_context *vctx;
//
// vctx = verbs_get_ctx_op(wq->context, destroy_wq);
// if (!vctx)
// return ENOSYS;
//
// return vctx->destroy_wq(wq);
//}
//
///*
// * ibv_create_rwq_ind_table - Creates a receive work queue Indirection Table
// * @context: ibv_context.
// * @init_attr: A list of initial attributes required to create the Indirection Table.
// * Return Value
// * ibv_create_rwq_ind_table returns a pointer to the created
// * Indirection Table, or NULL if the request fails.
// */
//static inline struct ibv_rwq_ind_table *ibv_create_rwq_ind_table(struct ibv_context *context,
// struct ibv_rwq_ind_table_init_attr *init_attr)
//{
// struct verbs_context *vctx;
//
// vctx = verbs_get_ctx_op(context, create_rwq_ind_table);
// if (!vctx) {
// errno = ENOSYS;
// return NULL;
// }
//
// return vctx->create_rwq_ind_table(context, init_attr);
//}
//
///*
// * ibv_destroy_rwq_ind_table - Destroys the specified Indirection Table.
// * @rwq_ind_table: The Indirection Table to destroy.
// * Return Value
// * ibv_destroy_rwq_ind_table() returns 0 on success, or the value of errno
// * on failure (which indicates the failure reason).
//*/
//static inline int ibv_destroy_rwq_ind_table(struct ibv_rwq_ind_table *rwq_ind_table)
//{
// struct verbs_context *vctx;
//
// vctx = verbs_get_ctx_op(rwq_ind_table->context, destroy_rwq_ind_table);
// if (!vctx)
// return ENOSYS;
//
// return vctx->destroy_rwq_ind_table(rwq_ind_table);
//}
//
///**
// * ibv_post_send - Post a list of work requests to a send queue.
// *
// * If IBV_SEND_INLINE flag is set, the data buffers can be reused
// * immediately after the call returns.
// */
//static inline int ibv_post_send(struct ibv_qp *qp, struct ibv_send_wr *wr,
// struct ibv_send_wr **bad_wr)
//{
// return qp->context->ops.post_send(qp, wr, bad_wr);
//}
//
///**
// * ibv_post_recv - Post a list of work requests to a receive queue.
// */
//static inline int ibv_post_recv(struct ibv_qp *qp, struct ibv_recv_wr *wr,
// struct ibv_recv_wr **bad_wr)
//{
// return qp->context->ops.post_recv(qp, wr, bad_wr);
//}
//
///**
// * ibv_create_ah - Create an address handle.
// */
//struct ibv_ah *ibv_create_ah(struct ibv_pd *pd, struct ibv_ah_attr *attr);
//
///**
// * ibv_init_ah_from_wc - Initializes address handle attributes from a
// * work completion.
// * @context: Device context on which the received message arrived.
// * @port_num: Port on which the received message arrived.
// * @wc: Work completion associated with the received message.
// * @grh: References the received global route header. This parameter is
// * ignored unless the work completion indicates that the GRH is valid.
// * @ah_attr: Returned attributes that can be used when creating an address
// * handle for replying to the message.
// */
//int ibv_init_ah_from_wc(struct ibv_context *context, uint8_t port_num,
// struct ibv_wc *wc, struct ibv_grh *grh,
// struct ibv_ah_attr *ah_attr);
//
///**
// * ibv_create_ah_from_wc - Creates an address handle associated with the
// * sender of the specified work completion.
// * @pd: The protection domain associated with the address handle.
// * @wc: Work completion information associated with a received message.
// * @grh: References the received global route header. This parameter is
// * ignored unless the work completion indicates that the GRH is valid.
// * @port_num: The outbound port number to associate with the address.
// *
// * The address handle is used to reference a local or global destination
// * in all UD QP post sends.
// */
//struct ibv_ah *ibv_create_ah_from_wc(struct ibv_pd *pd, struct ibv_wc *wc,
// struct ibv_grh *grh, uint8_t port_num);
//
///**
// * ibv_destroy_ah - Destroy an address handle.
// */
//int ibv_destroy_ah(struct ibv_ah *ah);
//
///**
// * ibv_attach_mcast - Attaches the specified QP to a multicast group.
// * @qp: QP to attach to the multicast group. The QP must be a UD QP.
// * @gid: Multicast group GID.
// * @lid: Multicast group LID in host byte order.
// *
// * In order to route multicast packets correctly, subnet
// * administration must have created the multicast group and configured
// * the fabric appropriately. The port associated with the specified
// * QP must also be a member of the multicast group.
// */
//int ibv_attach_mcast(struct ibv_qp *qp, const union ibv_gid *gid, uint16_t lid);
//
///**
// * ibv_detach_mcast - Detaches the specified QP from a multicast group.
// * @qp: QP to detach from the multicast group.
// * @gid: Multicast group GID.
// * @lid: Multicast group LID in host byte order.
// */
//int ibv_detach_mcast(struct ibv_qp *qp, const union ibv_gid *gid, uint16_t lid);
//
///**
// * ibv_fork_init - Prepare data structures so that fork() may be used
// * safely. If this function is not called or returns a non-zero
// * status, then libibverbs data structures are not fork()-safe and the
// * effect of an application calling fork() is undefined.
// */
//int ibv_fork_init(void);
//
///**
// * ibv_node_type_str - Return string describing node_type enum value
// */
//const char *ibv_node_type_str(enum ibv_node_type node_type);
//
///**
// * ibv_port_state_str - Return string describing port_state enum value
// */
//const char *ibv_port_state_str(enum ibv_port_state port_state);
//
///**
// * ibv_event_type_str - Return string describing event_type enum value
// */
//const char *ibv_event_type_str(enum ibv_event_type event);
//
#define ETHERNET_LL_SIZE 6
//int ibv_resolve_eth_l2_from_gid(struct ibv_context *context,
// struct ibv_ah_attr *attr,
// uint8_t eth_mac[ETHERNET_LL_SIZE],
// uint16_t *vid);
//
//static inline int ibv_is_qpt_supported(uint32_t caps, enum ibv_qp_type qpt)
//{
// return !!(caps & (1 << qpt));
//}
END_C_DECLS
# undef __attribute_const
#endif /* INFINIBAND_VERBS_H */

View file

@ -718,7 +718,10 @@ int check_add_port(char **service,int port,
{
int number;
if (asprintf(service,"%d", port) < 0) {
int str_size_max = 6;
*service = calloc(str_size_max, sizeof(char));
/* if (asprintf(service,"%d", port) < 0) { */
if (snprintf(*service, str_size_max, "%d", port) < 0) {
return FAILURE;
}

View file

@ -34,12 +34,6 @@
#include <hermit/ibv.h> // Geht per cmake copy workaround
/* #define N 255 */
/* static void test_handler(int s) */
/* { */
/* printf("Receive signal with number %d\n", s); */
/* } */
int main(int argc, char** argv)
{