1
0
Fork 0
mirror of https://github.com/hermitcore/libhermit.git synced 2025-03-23 00:00:05 +01:00
libhermit/tools/ibv_code_generator/verbs_ptr_structs.h

458 lines
13 KiB
C

struct ibv_async_event {
union {
struct ibv_cq *cq;
struct ibv_qp *qp;
struct ibv_srq *srq;
struct ibv_wq *wq;
int port_num;
} element;
enum ibv_event_type event_type;
};
struct ibv_mw_bind_info {
struct ibv_mr *mr;
uint64_t addr;
uint64_t length;
int mw_access_flags; /* use ibv_access_flags */
};
struct ibv_pd {
struct ibv_context *context;
uint32_t handle;
};
struct ibv_xrcd {
struct ibv_context *context;
};
struct ibv_mr {
struct ibv_context *context;
struct ibv_pd *pd;
void *addr;
size_t length;
uint32_t handle;
uint32_t lkey;
uint32_t rkey;
};
struct ibv_mw {
struct ibv_context *context;
struct ibv_pd *pd;
uint32_t rkey;
uint32_t handle;
enum ibv_mw_type type;
};
struct ibv_srq_init_attr {
void *srq_context;
struct ibv_srq_attr attr;
};
struct ibv_srq_init_attr_ex {
void *srq_context;
struct ibv_srq_attr attr;
uint32_t comp_mask;
enum ibv_srq_type srq_type;
struct ibv_pd *pd;
struct ibv_xrcd *xrcd;
struct ibv_cq *cq;
};
struct ibv_wq_init_attr {
void *wq_context;
enum ibv_wq_type wq_type;
uint32_t max_wr;
uint32_t max_sge;
struct ibv_pd *pd;
struct ibv_cq *cq;
uint32_t comp_mask; /* Use ibv_wq_init_attr_mask */
uint32_t create_flags; /* use ibv_wq_flags */
};
/*
* Receive Work Queue Indirection Table.
* It's used in order to distribute incoming packets between different
* Receive Work Queues. Associating Receive WQs with different CPU cores
* allows to workload the traffic between different CPU cores.
* The Indirection Table can contain only WQs of type IBV_WQT_RQ.
*/
struct ibv_rwq_ind_table {
struct ibv_context *context;
int ind_tbl_handle;
int ind_tbl_num;
uint32_t comp_mask;
};
/*
* Receive Work Queue Indirection Table attributes
*/
struct ibv_rwq_ind_table_init_attr {
uint32_t log_ind_tbl_size;
/* Each entry is a pointer to a Receive Work Queue */
struct ibv_wq **ind_tbl;
uint32_t comp_mask;
};
struct ibv_qp_init_attr {
void *qp_context;
struct ibv_cq *send_cq;
struct ibv_cq *recv_cq;
struct ibv_srq *srq;
struct ibv_qp_cap cap;
enum ibv_qp_type qp_type;
int sq_sig_all;
};
struct ibv_rx_hash_conf {
/* enum ibv_rx_hash_function_flags */
uint8_t rx_hash_function;
uint8_t rx_hash_key_len;
uint8_t *rx_hash_key;
/* enum ibv_rx_hash_fields */
uint64_t rx_hash_fields_mask;
};
struct ibv_qp_init_attr_ex {
void *qp_context;
struct ibv_cq *send_cq;
struct ibv_cq *recv_cq;
struct ibv_srq *srq;
struct ibv_qp_cap cap;
enum ibv_qp_type qp_type;
int sq_sig_all;
uint32_t comp_mask;
struct ibv_pd *pd;
struct ibv_xrcd *xrcd;
uint32_t create_flags;
uint16_t max_tso_header;
struct ibv_rwq_ind_table *rwq_ind_tbl;
struct ibv_rx_hash_conf rx_hash_conf;
uint32_t source_qpn;
};
struct ibv_qp_open_attr {
uint32_t comp_mask;
uint32_t qp_num;
struct ibv_xrcd *xrcd;
void *qp_context;
enum ibv_qp_type qp_type;
};
struct ibv_send_wr {
uint64_t wr_id;
struct ibv_send_wr *next;
struct ibv_sge *sg_list;
int num_sge;
enum ibv_wr_opcode opcode;
int send_flags;
/* When opcode is *_WITH_IMM: Immediate data in network byte order.
* When opcode is *_INV: Stores the rkey to invalidate
*/
union {
__be32 imm_data;
uint32_t invalidate_rkey;
};
union {
struct {
uint64_t remote_addr;
uint32_t rkey;
} rdma;
struct {
uint64_t remote_addr;
uint64_t compare_add;
uint64_t swap;
uint32_t rkey;
} atomic;
struct {
struct ibv_ah *ah;
uint32_t remote_qpn;
uint32_t remote_qkey;
} ud;
} wr;
union {
struct {
uint32_t remote_srqn;
} xrc;
} qp_type;
union {
struct {
struct ibv_mw *mw;
uint32_t rkey;
struct ibv_mw_bind_info bind_info;
} bind_mw;
struct {
void *hdr;
uint16_t hdr_sz;
uint16_t mss;
} tso;
};
};
struct ibv_recv_wr {
uint64_t wr_id;
struct ibv_recv_wr *next;
struct ibv_sge *sg_list;
int num_sge;
};
struct ibv_srq {
struct ibv_context *context;
void *srq_context;
struct ibv_pd *pd;
uint32_t handle;
pthread_mutex_t mutex;
pthread_cond_t cond;
uint32_t events_completed;
};
/*
* Work Queue. QP can be created without internal WQs "packaged" inside it,
* this QP can be configured to use "external" WQ object as its
* receive/send queue.
* WQ associated (many to one) with Completion Queue it owns WQ properties
* (PD, WQ size etc).
* WQ of type IBV_WQT_RQ:
* - Contains receive WQEs, in this case its PD serves as scatter as well.
* - Exposes post receive function to be used to post a list of work
* requests (WRs) to its receive queue.
*/
struct ibv_wq {
struct ibv_context *context;
void *wq_context;
struct ibv_pd *pd;
struct ibv_cq *cq;
uint32_t wq_num;
uint32_t handle;
enum ibv_wq_state state;
enum ibv_wq_type wq_type;
int (*post_recv)(struct ibv_wq *current,
struct ibv_recv_wr *recv_wr,
struct ibv_recv_wr **bad_recv_wr);
pthread_mutex_t mutex;
pthread_cond_t cond;
uint32_t events_completed;
uint32_t comp_mask;
};
struct ibv_qp {
struct ibv_context *context;
void *qp_context;
struct ibv_pd *pd;
struct ibv_cq *send_cq;
struct ibv_cq *recv_cq;
struct ibv_srq *srq;
uint32_t handle;
uint32_t qp_num;
enum ibv_qp_state state;
enum ibv_qp_type qp_type;
pthread_mutex_t mutex;
pthread_cond_t cond;
uint32_t events_completed;
};
struct ibv_comp_channel {
struct ibv_context *context;
int fd;
int refcnt;
};
struct ibv_cq {
struct ibv_context *context;
struct ibv_comp_channel *channel;
void *cq_context;
uint32_t handle;
int cqe;
pthread_mutex_t mutex;
pthread_cond_t cond;
uint32_t comp_events_completed;
uint32_t async_events_completed;
};
struct ibv_cq_ex {
struct ibv_context *context;
struct ibv_comp_channel *channel;
void *cq_context;
uint32_t handle;
int cqe;
pthread_mutex_t mutex;
pthread_cond_t cond;
uint32_t comp_events_completed;
uint32_t async_events_completed;
uint32_t comp_mask;
enum ibv_wc_status status;
uint64_t wr_id;
int (*start_poll)(struct ibv_cq_ex *current,
struct ibv_poll_cq_attr *attr);
int (*next_poll)(struct ibv_cq_ex *current);
void (*end_poll)(struct ibv_cq_ex *current);
enum ibv_wc_opcode (*read_opcode)(struct ibv_cq_ex *current);
uint32_t (*read_vendor_err)(struct ibv_cq_ex *current);
uint32_t (*read_byte_len)(struct ibv_cq_ex *current);
__be32 (*read_imm_data)(struct ibv_cq_ex *current);
uint32_t (*read_qp_num)(struct ibv_cq_ex *current);
uint32_t (*read_src_qp)(struct ibv_cq_ex *current);
int (*read_wc_flags)(struct ibv_cq_ex *current);
uint32_t (*read_slid)(struct ibv_cq_ex *current);
uint8_t (*read_sl)(struct ibv_cq_ex *current);
uint8_t (*read_dlid_path_bits)(struct ibv_cq_ex *current);
uint64_t (*read_completion_ts)(struct ibv_cq_ex *current);
uint16_t (*read_cvlan)(struct ibv_cq_ex *current);
uint32_t (*read_flow_tag)(struct ibv_cq_ex *current);
};
struct ibv_ah {
struct ibv_context *context;
struct ibv_pd *pd;
uint32_t handle;
};
struct ibv_context *context;
struct ibv_flow {
uint32_t comp_mask;
uint32_t handle;
};
/* Obsolete, never used, do not touch */
struct _ibv_device_ops {
struct ibv_context * (*_dummy1)(struct ibv_device *device, int cmd_fd);
void (*_dummy2)(struct ibv_context *context);
};
struct ibv_context_ops {
int (*query_device)(struct ibv_context *context,
struct ibv_device_attr *device_attr);
int (*query_port)(struct ibv_context *context, uint8_t port_num,
struct ibv_port_attr *port_attr);
struct ibv_pd * (*alloc_pd)(struct ibv_context *context);
int (*dealloc_pd)(struct ibv_pd *pd);
struct ibv_mr * (*reg_mr)(struct ibv_pd *pd, void *addr, size_t length,
int access);
int (*rereg_mr)(struct ibv_mr *mr,
int flags,
struct ibv_pd *pd, void *addr,
size_t length,
int access);
int (*dereg_mr)(struct ibv_mr *mr);
struct ibv_mw * (*alloc_mw)(struct ibv_pd *pd, enum ibv_mw_type type);
int (*bind_mw)(struct ibv_qp *qp, struct ibv_mw *mw,
struct ibv_mw_bind *mw_bind);
int (*dealloc_mw)(struct ibv_mw *mw);
struct ibv_cq * (*create_cq)(struct ibv_context *context, int cqe,
struct ibv_comp_channel *channel,
int comp_vector);
int (*poll_cq)(struct ibv_cq *cq, int num_entries, struct ibv_wc *wc);
int (*req_notify_cq)(struct ibv_cq *cq, int solicited_only);
void (*cq_event)(struct ibv_cq *cq);
int (*resize_cq)(struct ibv_cq *cq, int cqe);
int (*destroy_cq)(struct ibv_cq *cq);
struct ibv_srq * (*create_srq)(struct ibv_pd *pd,
struct ibv_srq_init_attr *srq_init_attr);
int (*modify_srq)(struct ibv_srq *srq,
struct ibv_srq_attr *srq_attr,
int srq_attr_mask);
int (*query_srq)(struct ibv_srq *srq,
struct ibv_srq_attr *srq_attr);
int (*destroy_srq)(struct ibv_srq *srq);
int (*post_srq_recv)(struct ibv_srq *srq,
struct ibv_recv_wr *recv_wr,
struct ibv_recv_wr **bad_recv_wr);
struct ibv_qp * (*create_qp)(struct ibv_pd *pd, struct ibv_qp_init_attr *attr);
int (*query_qp)(struct ibv_qp *qp, struct ibv_qp_attr *attr,
int attr_mask,
struct ibv_qp_init_attr *init_attr);
int (*modify_qp)(struct ibv_qp *qp, struct ibv_qp_attr *attr,
int attr_mask);
int (*destroy_qp)(struct ibv_qp *qp);
int (*post_send)(struct ibv_qp *qp, struct ibv_send_wr *wr,
struct ibv_send_wr **bad_wr);
int (*post_recv)(struct ibv_qp *qp, struct ibv_recv_wr *wr,
struct ibv_recv_wr **bad_wr);
struct ibv_ah * (*create_ah)(struct ibv_pd *pd, struct ibv_ah_attr *attr);
int (*destroy_ah)(struct ibv_ah *ah);
int (*attach_mcast)(struct ibv_qp *qp, const union ibv_gid *gid,
uint16_t lid);
int (*detach_mcast)(struct ibv_qp *qp, const union ibv_gid *gid,
uint16_t lid);
void (*async_event)(struct ibv_async_event *event);
};
struct ibv_context {
struct ibv_device *device;
struct ibv_context_ops ops;
int cmd_fd;
int async_fd;
int num_comp_vectors;
pthread_mutex_t mutex;
void *abi_compat;
};
struct ibv_cq_init_attr_ex {
/* Minimum number of entries required for CQ */
uint32_t cqe;
/* Consumer-supplied context returned for completion events */
void *cq_context;
/* Completion channel where completion events will be queued.
* May be NULL if completion events will not be used.
*/
struct ibv_comp_channel *channel;
/* Completion vector used to signal completion events.
* Must be < context->num_comp_vectors.
*/
uint32_t comp_vector;
/* Or'ed bit of enum ibv_create_cq_wc_flags. */
uint64_t wc_flags;
/* compatibility mask (extended verb). Or'd flags of
* enum ibv_cq_init_attr_mask
*/
uint32_t comp_mask;
/* create cq attr flags - one or more flags from
* enum ibv_create_cq_attr_flags
*/
uint32_t flags;
};
struct verbs_context {
/* "grows up" - new fields go here */
int (*destroy_rwq_ind_table)(struct ibv_rwq_ind_table *rwq_ind_table);
struct ibv_rwq_ind_table *(*create_rwq_ind_table)(struct ibv_context *context,
struct ibv_rwq_ind_table_init_attr *init_attr);
int (*destroy_wq)(struct ibv_wq *wq);
int (*modify_wq)(struct ibv_wq *wq, struct ibv_wq_attr *wq_attr);
struct ibv_wq * (*create_wq)(struct ibv_context *context,
struct ibv_wq_init_attr *wq_init_attr);
int (*query_rt_values)(struct ibv_context *context,
struct ibv_values_ex *values);
struct ibv_cq_ex *(*create_cq_ex)(struct ibv_context *context,
struct ibv_cq_init_attr_ex *init_attr);
struct verbs_ex_private *priv;
int (*query_device_ex)(struct ibv_context *context,
const struct ibv_query_device_ex_input *input,
struct ibv_device_attr_ex *attr,
size_t attr_size);
int (*ibv_destroy_flow) (struct ibv_flow *flow);
void (*ABI_placeholder2) (void); /* DO NOT COPY THIS GARBAGE */
struct ibv_flow * (*ibv_create_flow) (struct ibv_qp *qp,
struct ibv_flow_attr *flow_attr);
void (*ABI_placeholder1) (void); /* DO NOT COPY THIS GARBAGE */
struct ibv_qp *(*open_qp)(struct ibv_context *context,
struct ibv_qp_open_attr *attr);
struct ibv_qp *(*create_qp_ex)(struct ibv_context *context,
struct ibv_qp_init_attr_ex *qp_init_attr_ex);
int (*get_srq_num)(struct ibv_srq *srq, uint32_t *srq_num);
struct ibv_srq * (*create_srq_ex)(struct ibv_context *context,
struct ibv_srq_init_attr_ex *srq_init_attr_ex);
struct ibv_xrcd * (*open_xrcd)(struct ibv_context *context,
struct ibv_xrcd_init_attr *xrcd_init_attr);
int (*close_xrcd)(struct ibv_xrcd *xrcd);
uint64_t has_comp_mask;
size_t sz; /* Must be immediately before struct ibv_context */
struct ibv_context context; /* Must be last field in the struct */
};