rdma/siw: application interface

Broken up commit to add the Soft iWarp RDMA driver.

Signed-off-by: Bernard Metzler <bmt@zurich.ibm.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
Bernard Metzler 2019-06-20 18:21:27 +02:00 committed by Jason Gunthorpe
parent 6c52fdc244
commit 303ae1cdfd
4 changed files with 2037 additions and 0 deletions

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,91 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
/* Copyright (c) 2008-2019, IBM Corporation */
#ifndef _SIW_VERBS_H
#define _SIW_VERBS_H
#include <linux/errno.h>
#include <rdma/iw_cm.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h>
#include "siw.h"
#include "siw_cm.h"
/*
* siw_copy_sgl()
*
* Copy SGL from RDMA core representation to local
* representation.
*/
static inline void siw_copy_sgl(struct ib_sge *sge, struct siw_sge *siw_sge,
int num_sge)
{
while (num_sge--) {
siw_sge->laddr = sge->addr;
siw_sge->length = sge->length;
siw_sge->lkey = sge->lkey;
siw_sge++;
sge++;
}
}
int siw_alloc_ucontext(struct ib_ucontext *base_ctx, struct ib_udata *udata);
void siw_dealloc_ucontext(struct ib_ucontext *base_ctx);
int siw_query_port(struct ib_device *base_dev, u8 port,
struct ib_port_attr *attr);
int siw_get_port_immutable(struct ib_device *base_dev, u8 port,
struct ib_port_immutable *port_immutable);
int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr,
struct ib_udata *udata);
int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata);
int siw_query_port(struct ib_device *base_dev, u8 port,
struct ib_port_attr *attr);
int siw_query_pkey(struct ib_device *base_dev, u8 port, u16 idx, u16 *pkey);
int siw_query_gid(struct ib_device *base_dev, u8 port, int idx,
union ib_gid *gid);
int siw_alloc_pd(struct ib_pd *base_pd, struct ib_udata *udata);
void siw_dealloc_pd(struct ib_pd *base_pd, struct ib_udata *udata);
struct ib_qp *siw_create_qp(struct ib_pd *base_pd,
struct ib_qp_init_attr *attr,
struct ib_udata *udata);
int siw_query_qp(struct ib_qp *base_qp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
int siw_verbs_modify_qp(struct ib_qp *base_qp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata);
int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr);
int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr);
void siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata);
int siw_poll_cq(struct ib_cq *base_cq, int num_entries, struct ib_wc *wc);
int siw_req_notify_cq(struct ib_cq *base_cq, enum ib_cq_notify_flags flags);
struct ib_mr *siw_reg_user_mr(struct ib_pd *base_pd, u64 start, u64 len,
u64 rnic_va, int rights, struct ib_udata *udata);
struct ib_mr *siw_alloc_mr(struct ib_pd *base_pd, enum ib_mr_type mr_type,
u32 max_sge, struct ib_udata *udata);
struct ib_mr *siw_get_dma_mr(struct ib_pd *base_pd, int rights);
int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
unsigned int *sg_off);
int siw_dereg_mr(struct ib_mr *base_mr, struct ib_udata *udata);
int siw_create_srq(struct ib_srq *base_srq, struct ib_srq_init_attr *attr,
struct ib_udata *udata);
int siw_modify_srq(struct ib_srq *base_srq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask mask, struct ib_udata *udata);
int siw_query_srq(struct ib_srq *base_srq, struct ib_srq_attr *attr);
void siw_destroy_srq(struct ib_srq *base_srq, struct ib_udata *udata);
int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr);
int siw_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma);
void siw_qp_event(struct siw_qp *qp, enum ib_event_type type);
void siw_cq_event(struct siw_cq *cq, enum ib_event_type type);
void siw_srq_event(struct siw_srq *srq, enum ib_event_type type);
void siw_port_event(struct siw_device *dev, u8 port, enum ib_event_type type);
#endif

View File

@ -103,6 +103,7 @@ enum rdma_driver_id {
RDMA_DRIVER_HFI1,
RDMA_DRIVER_QIB,
RDMA_DRIVER_EFA,
RDMA_DRIVER_SIW,
};
#endif

185
include/uapi/rdma/siw-abi.h Normal file
View File

@ -0,0 +1,185 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
/* Copyright (c) 2008-2019, IBM Corporation */
#ifndef _SIW_USER_H
#define _SIW_USER_H
#include <linux/types.h>
#define SIW_NODE_DESC_COMMON "Software iWARP stack"
#define SIW_ABI_VERSION 1
#define SIW_MAX_SGE 6
#define SIW_UOBJ_MAX_KEY 0x08FFFF
#define SIW_INVAL_UOBJ_KEY (SIW_UOBJ_MAX_KEY + 1)
struct siw_uresp_create_cq {
__u32 cq_id;
__u32 num_cqe;
__aligned_u64 cq_key;
};
struct siw_uresp_create_qp {
__u32 qp_id;
__u32 num_sqe;
__u32 num_rqe;
__u32 pad;
__aligned_u64 sq_key;
__aligned_u64 rq_key;
};
struct siw_ureq_reg_mr {
__u8 stag_key;
__u8 reserved[3];
__u32 pad;
};
struct siw_uresp_reg_mr {
__u32 stag;
__u32 pad;
};
struct siw_uresp_create_srq {
__u32 num_rqe;
__u32 pad;
__aligned_u64 srq_key;
};
struct siw_uresp_alloc_ctx {
__u32 dev_id;
__u32 pad;
};
enum siw_opcode {
SIW_OP_WRITE,
SIW_OP_READ,
SIW_OP_READ_LOCAL_INV,
SIW_OP_SEND,
SIW_OP_SEND_WITH_IMM,
SIW_OP_SEND_REMOTE_INV,
/* Unsupported */
SIW_OP_FETCH_AND_ADD,
SIW_OP_COMP_AND_SWAP,
SIW_OP_RECEIVE,
/* provider internal SQE */
SIW_OP_READ_RESPONSE,
/*
* below opcodes valid for
* in-kernel clients only
*/
SIW_OP_INVAL_STAG,
SIW_OP_REG_MR,
SIW_NUM_OPCODES
};
/* Keep it same as ibv_sge to allow for memcpy */
struct siw_sge {
__aligned_u64 laddr;
__u32 length;
__u32 lkey;
};
/*
* Inline data are kept within the work request itself occupying
* the space of sge[1] .. sge[n]. Therefore, inline data cannot be
* supported if SIW_MAX_SGE is below 2 elements.
*/
#define SIW_MAX_INLINE (sizeof(struct siw_sge) * (SIW_MAX_SGE - 1))
#if SIW_MAX_SGE < 2
#error "SIW_MAX_SGE must be at least 2"
#endif
enum siw_wqe_flags {
SIW_WQE_VALID = 1,
SIW_WQE_INLINE = (1 << 1),
SIW_WQE_SIGNALLED = (1 << 2),
SIW_WQE_SOLICITED = (1 << 3),
SIW_WQE_READ_FENCE = (1 << 4),
SIW_WQE_REM_INVAL = (1 << 5),
SIW_WQE_COMPLETED = (1 << 6)
};
/* Send Queue Element */
struct siw_sqe {
__aligned_u64 id;
__u16 flags;
__u8 num_sge;
/* Contains enum siw_opcode values */
__u8 opcode;
__u32 rkey;
union {
__aligned_u64 raddr;
__aligned_u64 base_mr;
};
union {
struct siw_sge sge[SIW_MAX_SGE];
__aligned_u64 access;
};
};
/* Receive Queue Element */
struct siw_rqe {
__aligned_u64 id;
__u16 flags;
__u8 num_sge;
/*
* only used by kernel driver,
* ignored if set by user
*/
__u8 opcode;
__u32 unused;
struct siw_sge sge[SIW_MAX_SGE];
};
enum siw_notify_flags {
SIW_NOTIFY_NOT = (0),
SIW_NOTIFY_SOLICITED = (1 << 0),
SIW_NOTIFY_NEXT_COMPLETION = (1 << 1),
SIW_NOTIFY_MISSED_EVENTS = (1 << 2),
SIW_NOTIFY_ALL = SIW_NOTIFY_SOLICITED | SIW_NOTIFY_NEXT_COMPLETION |
SIW_NOTIFY_MISSED_EVENTS
};
enum siw_wc_status {
SIW_WC_SUCCESS,
SIW_WC_LOC_LEN_ERR,
SIW_WC_LOC_PROT_ERR,
SIW_WC_LOC_QP_OP_ERR,
SIW_WC_WR_FLUSH_ERR,
SIW_WC_BAD_RESP_ERR,
SIW_WC_LOC_ACCESS_ERR,
SIW_WC_REM_ACCESS_ERR,
SIW_WC_REM_INV_REQ_ERR,
SIW_WC_GENERAL_ERR,
SIW_NUM_WC_STATUS
};
struct siw_cqe {
__aligned_u64 id;
__u8 flags;
__u8 opcode;
__u16 status;
__u32 bytes;
union {
__aligned_u64 imm_data;
__u32 inval_stag;
};
/* QP number or QP pointer */
union {
struct ib_qp *base_qp;
__aligned_u64 qp_id;
};
};
/*
* Shared structure between user and kernel
* to control CQ arming.
*/
struct siw_cq_ctrl {
__aligned_u64 notify;
};
#endif