scsi: qla2xxx: Add FC-NVMe F/W initialization and transport registration

This code provides the interfaces to register remote and local ports of
FC4 type 0x28 with the FC-NVMe transport and transports the requests
(FC-NVMe FC link services and FC-NVMe commands IUs) to the fabric. It
also provides the support for allocating h/w queues and aborting FC-NVMe
FC requests.

Signed-off-by: Darren Trapp <darren.trapp@cavium.com>
Signed-off-by: Duane Grigsby <duane.grigsby@cavium.com>
Signed-off-by: Anil Gurumurthy <anil.gurumurhty@cavium.com>
Signed-off-by: Giridhar Malavali <giridhar.malavali@cavium.com>
Signed-off-by: Himanshu Madhani <himanshu.madhani@cavium.com>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
Duane Grigsby 2017-06-21 13:48:43 -07:00 committed by Martin K. Petersen
parent 7401bc18d1
commit e84067d743
11 changed files with 1024 additions and 9 deletions

View File

@ -1,6 +1,6 @@
qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
qla_nx.o qla_mr.o qla_nx2.o qla_target.o qla_tmpl.o
qla_nx.o qla_mr.o qla_nx2.o qla_target.o qla_tmpl.o qla_nvme.o
obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o

View File

@ -15,7 +15,7 @@
* | | | 0x015b-0x0160 |
* | | | 0x016e |
* | Mailbox commands | 0x1199 | 0x1193 |
* | Device Discovery | 0x2131 | 0x210e-0x2116 |
* | Device Discovery | 0x2134 | 0x210e-0x2116 |
* | | | 0x211a |
* | | | 0x211c-0x2128 |
* | | | 0x212a-0x2130 |

View File

@ -37,6 +37,7 @@
#include "qla_bsg.h"
#include "qla_nx.h"
#include "qla_nx2.h"
#include "qla_nvme.h"
#define QLA2XXX_DRIVER_NAME "qla2xxx"
#define QLA2XXX_APIDEV "ql2xapidev"
#define QLA2XXX_MANUFACTURER "QLogic Corporation"
@ -423,6 +424,7 @@ struct srb_iocb {
int rsp_len;
dma_addr_t cmd_dma;
dma_addr_t rsp_dma;
enum nvmefc_fcp_datadir dir;
uint32_t dl;
uint32_t timeout_sec;
} nvme;
@ -452,6 +454,7 @@ struct srb_iocb {
#define SRB_NACK_PRLI 17
#define SRB_NACK_LOGO 18
#define SRB_NVME_CMD 19
#define SRB_NVME_LS 20
#define SRB_PRLI_CMD 21
enum {
@ -467,6 +470,7 @@ typedef struct srb {
uint8_t cmd_type;
uint8_t pad[3];
atomic_t ref_count;
wait_queue_head_t nvme_ls_waitQ;
struct fc_port *fcport;
struct scsi_qla_host *vha;
uint32_t handle;
@ -2298,6 +2302,7 @@ typedef struct fc_port {
struct work_struct nvme_del_work;
atomic_t nvme_ref_count;
wait_queue_head_t nvme_waitQ;
uint32_t nvme_prli_service_param;
#define NVME_PRLI_SP_CONF BIT_7
#define NVME_PRLI_SP_INITIATOR BIT_5
@ -4124,6 +4129,7 @@ typedef struct scsi_qla_host {
struct nvme_fc_local_port *nvme_local_port;
atomic_t nvme_ref_count;
wait_queue_head_t nvme_waitQ;
struct list_head nvme_rport_list;
atomic_t nvme_active_aen_cnt;
uint16_t nvme_last_rptd_aen;

View File

@ -9,6 +9,16 @@
#include <linux/interrupt.h>
/*
* Global functions prototype in qla_nvme.c source file.
*/
extern void qla_nvme_register_hba(scsi_qla_host_t *);
extern int qla_nvme_register_remote(scsi_qla_host_t *, fc_port_t *);
extern void qla_nvme_delete(scsi_qla_host_t *);
extern void qla_nvme_abort(struct qla_hw_data *, srb_t *sp);
extern void qla24xx_nvme_ls4_iocb(scsi_qla_host_t *, struct pt_ls4_request *,
struct req_que *);
/*
* Global Function Prototypes in qla_init.c source file.
*/
@ -141,6 +151,7 @@ extern int ql2xiniexchg;
extern int ql2xfwholdabts;
extern int ql2xmvasynctoatio;
extern int ql2xuctrlirq;
extern int ql2xnvmeenable;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);

View File

@ -4520,6 +4520,11 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
fcport->deleted = 0;
fcport->logout_on_delete = 1;
if (fcport->fc4f_nvme) {
qla_nvme_register_remote(vha, fcport);
return;
}
qla2x00_set_fcport_state(fcport, FCS_ONLINE);
qla2x00_iidma_fcport(vha, fcport);
qla24xx_update_fcport_fcp_prio(vha, fcport);
@ -4669,6 +4674,9 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
break;
} while (0);
if (!vha->nvme_local_port && vha->flags.nvme_enabled)
qla_nvme_register_hba(vha);
if (rval)
ql_dbg(ql_dbg_disc, vha, 0x2068,
"Configure fabric error exit rval=%d.\n", rval);

View File

@ -3155,6 +3155,39 @@ static void qla2x00_send_notify_ack_iocb(srb_t *sp,
nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
}
/*
* Build NVME LS request
*/
static int
qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
{
struct srb_iocb *nvme;
int rval = QLA_SUCCESS;
nvme = &sp->u.iocb_cmd;
cmd_pkt->entry_type = PT_LS4_REQUEST;
cmd_pkt->entry_count = 1;
cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT;
cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
cmd_pkt->tx_dseg_count = 1;
cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
cmd_pkt->dseg0_len = nvme->u.nvme.cmd_len;
cmd_pkt->dseg0_address[0] = cpu_to_le32(LSD(nvme->u.nvme.cmd_dma));
cmd_pkt->dseg0_address[1] = cpu_to_le32(MSD(nvme->u.nvme.cmd_dma));
cmd_pkt->rx_dseg_count = 1;
cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
cmd_pkt->dseg1_len = nvme->u.nvme.rsp_len;
cmd_pkt->dseg1_address[0] = cpu_to_le32(LSD(nvme->u.nvme.rsp_dma));
cmd_pkt->dseg1_address[1] = cpu_to_le32(MSD(nvme->u.nvme.rsp_dma));
return rval;
}
int
qla2x00_start_sp(srb_t *sp)
{
@ -3211,6 +3244,9 @@ qla2x00_start_sp(srb_t *sp)
case SRB_FXIOCB_BCMD:
qlafx00_fxdisc_iocb(sp, pkt);
break;
case SRB_NVME_LS:
qla_nvme_ls(sp, pkt);
break;
case SRB_ABT_CMD:
IS_QLAFX00(ha) ?
qlafx00_abort_iocb(sp, pkt) :

View File

@ -2828,6 +2828,21 @@ qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
sp->done(sp, 0);
}
void qla24xx_nvme_ls4_iocb(scsi_qla_host_t *vha, struct pt_ls4_request *pkt,
struct req_que *req)
{
srb_t *sp;
const char func[] = "LS4_IOCB";
uint16_t comp_status;
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (!sp)
return;
comp_status = le16_to_cpu(pkt->status);
sp->done(sp, comp_status);
}
/**
* qla24xx_process_response_queue() - Process response queue entries.
* @ha: SCSI driver HA context
@ -2901,6 +2916,10 @@ process_err:
case CTIO_CRC2:
qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt);
break;
case PT_LS4_REQUEST:
qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt,
rsp->req);
break;
case NOTIFY_ACK_TYPE:
if (pkt->handle == QLA_TGT_SKIP_HANDLE)
qlt_response_pkt_all_vps(vha, rsp,

View File

@ -560,6 +560,8 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
}
#define EXTENDED_BB_CREDITS BIT_0
#define NVME_ENABLE_FLAG BIT_3
/*
* qla2x00_execute_fw
* Start adapter firmware.
@ -601,6 +603,9 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
} else
mcp->mb[4] = 0;
if (ql2xnvmeenable && IS_QLA27XX(ha))
mcp->mb[4] |= NVME_ENABLE_FLAG;
if (ha->flags.exlogins_enabled)
mcp->mb[4] |= ENABLE_EXTENDED_LOGIN;
@ -941,6 +946,22 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191,
"%s: Firmware supports Exchange Offload 0x%x\n",
__func__, ha->fw_attributes_h);
/* bit 26 of fw_attributes */
if ((ha->fw_attributes_h & 0x400) && ql2xnvmeenable) {
struct init_cb_24xx *icb;
icb = (struct init_cb_24xx *)ha->init_cb;
/*
* fw supports nvme and driver load
* parameter requested nvme
*/
vha->flags.nvme_enabled = 1;
icb->firmware_options_2 &= cpu_to_le32(~0xf);
ha->zio_mode = 0;
ha->zio_timer = 0;
}
}
if (IS_QLA27XX(ha)) {

View File

@ -0,0 +1,756 @@
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2017 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_nvme.h"
#include "qla_def.h"
#include <linux/scatterlist.h>
#include <linux/delay.h>
#include <linux/nvme.h>
#include <linux/nvme-fc.h>
static struct nvme_fc_port_template qla_nvme_fc_transport;
static void qla_nvme_unregister_remote_port(struct work_struct *);
int qla_nvme_register_remote(scsi_qla_host_t *vha, fc_port_t *fcport)
{
#if (IS_ENABLED(CONFIG_NVME_FC))
struct nvme_rport *rport;
int ret;
if (fcport->nvme_flag & NVME_FLAG_REGISTERED)
return 0;
if (!vha->flags.nvme_enabled) {
ql_log(ql_log_info, vha, 0x2100,
"%s: Not registering target since Host NVME is not enabled\n",
__func__);
return 0;
}
if (!(fcport->nvme_prli_service_param &
(NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)))
return 0;
INIT_WORK(&fcport->nvme_del_work, qla_nvme_unregister_remote_port);
rport = kzalloc(sizeof(*rport), GFP_KERNEL);
if (!rport) {
ql_log(ql_log_warn, vha, 0x2101,
"%s: unable to alloc memory\n", __func__);
return -ENOMEM;
}
rport->req.port_name = wwn_to_u64(fcport->port_name);
rport->req.node_name = wwn_to_u64(fcport->node_name);
rport->req.port_role = 0;
if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
rport->req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
rport->req.port_role |= FC_PORT_ROLE_NVME_TARGET;
if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
rport->req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
rport->req.port_id = fcport->d_id.b24;
ql_log(ql_log_info, vha, 0x2102,
"%s: traddr=pn-0x%016llx:nn-0x%016llx PortID:%06x\n",
__func__, rport->req.port_name, rport->req.node_name,
rport->req.port_id);
ret = nvme_fc_register_remoteport(vha->nvme_local_port, &rport->req,
&fcport->nvme_remote_port);
if (ret) {
ql_log(ql_log_warn, vha, 0x212e,
"Failed to register remote port. Transport returned %d\n",
ret);
return ret;
}
fcport->nvme_remote_port->private = fcport;
fcport->nvme_flag |= NVME_FLAG_REGISTERED;
atomic_set(&fcport->nvme_ref_count, 1);
init_waitqueue_head(&fcport->nvme_waitQ);
rport->fcport = fcport;
list_add_tail(&rport->list, &vha->nvme_rport_list);
#endif
return 0;
}
/* Allocate a queue for NVMe traffic */
static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport, unsigned int qidx,
u16 qsize, void **handle)
{
struct scsi_qla_host *vha;
struct qla_hw_data *ha;
struct qla_qpair *qpair;
if (!qidx)
qidx++;
vha = (struct scsi_qla_host *)lport->private;
ha = vha->hw;
ql_log(ql_log_info, vha, 0x2104,
"%s: handle %p, idx =%d, qsize %d\n",
__func__, handle, qidx, qsize);
if (qidx > qla_nvme_fc_transport.max_hw_queues) {
ql_log(ql_log_warn, vha, 0x212f,
"%s: Illegal qidx=%d. Max=%d\n",
__func__, qidx, qla_nvme_fc_transport.max_hw_queues);
return -EINVAL;
}
if (ha->queue_pair_map[qidx]) {
*handle = ha->queue_pair_map[qidx];
ql_log(ql_log_info, vha, 0x2121,
"Returning existing qpair of %p for idx=%x\n",
*handle, qidx);
return 0;
}
ql_log(ql_log_warn, vha, 0xffff,
"allocating q for idx=%x w/o cpu mask\n", qidx);
qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
if (qpair == NULL) {
ql_log(ql_log_warn, vha, 0x2122,
"Failed to allocate qpair\n");
return -EINVAL;
}
*handle = qpair;
return 0;
}
static void qla_nvme_sp_ls_done(void *ptr, int res)
{
srb_t *sp = ptr;
struct srb_iocb *nvme;
struct nvmefc_ls_req *fd;
struct nvme_private *priv;
if (atomic_read(&sp->ref_count) == 0) {
ql_log(ql_log_warn, sp->fcport->vha, 0x2123,
"SP reference-count to ZERO on LS_done -- sp=%p.\n", sp);
return;
}
if (!atomic_dec_and_test(&sp->ref_count))
return;
if (res)
res = -EINVAL;
nvme = &sp->u.iocb_cmd;
fd = nvme->u.nvme.desc;
priv = fd->private;
priv->comp_status = res;
schedule_work(&priv->ls_work);
/* work schedule doesn't need the sp */
qla2x00_rel_sp(sp);
}
static void qla_nvme_sp_done(void *ptr, int res)
{
srb_t *sp = ptr;
struct srb_iocb *nvme;
struct nvmefc_fcp_req *fd;
nvme = &sp->u.iocb_cmd;
fd = nvme->u.nvme.desc;
if (!atomic_dec_and_test(&sp->ref_count))
return;
if (!(sp->fcport->nvme_flag & NVME_FLAG_REGISTERED))
goto rel;
if (unlikely(nvme->u.nvme.comp_status || res))
fd->status = -EINVAL;
else
fd->status = 0;
fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len;
fd->done(fd);
rel:
qla2xxx_rel_qpair_sp(sp->qpair, sp);
}
static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
{
struct nvme_private *priv = fd->private;
fc_port_t *fcport = rport->private;
srb_t *sp = priv->sp;
int rval;
struct qla_hw_data *ha = fcport->vha->hw;
rval = ha->isp_ops->abort_command(sp);
if (rval != QLA_SUCCESS)
ql_log(ql_log_warn, fcport->vha, 0x2125,
"%s: failed to abort LS command for SP:%p rval=%x\n",
__func__, sp, rval);
ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
"%s: aborted sp:%p on fcport:%p\n", __func__, sp, fcport);
}
static void qla_nvme_ls_complete(struct work_struct *work)
{
struct nvme_private *priv =
container_of(work, struct nvme_private, ls_work);
struct nvmefc_ls_req *fd = priv->fd;
fd->done(fd, priv->comp_status);
}
static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
{
fc_port_t *fcport = (fc_port_t *)rport->private;
struct srb_iocb *nvme;
struct nvme_private *priv = fd->private;
struct scsi_qla_host *vha;
int rval = QLA_FUNCTION_FAILED;
struct qla_hw_data *ha;
srb_t *sp;
if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))
return rval;
vha = fcport->vha;
ha = vha->hw;
/* Alloc SRB structure */
sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
if (!sp)
return rval;
sp->type = SRB_NVME_LS;
sp->name = "nvme_ls";
sp->done = qla_nvme_sp_ls_done;
atomic_set(&sp->ref_count, 1);
init_waitqueue_head(&sp->nvme_ls_waitQ);
nvme = &sp->u.iocb_cmd;
priv->sp = sp;
priv->fd = fd;
INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
nvme->u.nvme.desc = fd;
nvme->u.nvme.dir = 0;
nvme->u.nvme.dl = 0;
nvme->u.nvme.cmd_len = fd->rqstlen;
nvme->u.nvme.rsp_len = fd->rsplen;
nvme->u.nvme.rsp_dma = fd->rspdma;
nvme->u.nvme.timeout_sec = fd->timeout;
nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr,
fd->rqstlen, DMA_TO_DEVICE);
dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
fd->rqstlen, DMA_TO_DEVICE);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x700e,
"qla2x00_start_sp failed = %d\n", rval);
atomic_dec(&sp->ref_count);
wake_up(&sp->nvme_ls_waitQ);
return rval;
}
return rval;
}
static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
struct nvme_fc_remote_port *rport, void *hw_queue_handle,
struct nvmefc_fcp_req *fd)
{
struct nvme_private *priv = fd->private;
srb_t *sp = priv->sp;
int rval;
fc_port_t *fcport = rport->private;
struct qla_hw_data *ha = fcport->vha->hw;
rval = ha->isp_ops->abort_command(sp);
if (!rval)
ql_log(ql_log_warn, fcport->vha, 0x2127,
"%s: failed to abort command for SP:%p rval=%x\n",
__func__, sp, rval);
ql_dbg(ql_dbg_io, fcport->vha, 0x2126,
"%s: aborted sp:%p on fcport:%p\n", __func__, sp, fcport);
}
static void qla_nvme_poll(struct nvme_fc_local_port *lport, void *hw_queue_handle)
{
struct scsi_qla_host *vha = lport->private;
unsigned long flags;
struct qla_qpair *qpair = (struct qla_qpair *)hw_queue_handle;
/* Acquire ring specific lock */
spin_lock_irqsave(&qpair->qp_lock, flags);
qla24xx_process_response_queue(vha, qpair->rsp);
spin_unlock_irqrestore(&qpair->qp_lock, flags);
}
static int qla2x00_start_nvme_mq(srb_t *sp)
{
unsigned long flags;
uint32_t *clr_ptr;
uint32_t index;
uint32_t handle;
struct cmd_nvme *cmd_pkt;
uint16_t cnt, i;
uint16_t req_cnt;
uint16_t tot_dsds;
uint16_t avail_dsds;
uint32_t *cur_dsd;
struct req_que *req = NULL;
struct scsi_qla_host *vha = sp->fcport->vha;
struct qla_hw_data *ha = vha->hw;
struct qla_qpair *qpair = sp->qpair;
struct srb_iocb *nvme = &sp->u.iocb_cmd;
struct scatterlist *sgl, *sg;
struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
uint32_t rval = QLA_SUCCESS;
/* Setup qpair pointers */
req = qpair->req;
tot_dsds = fd->sg_cnt;
/* Acquire qpair specific lock */
spin_lock_irqsave(&qpair->qp_lock, flags);
/* Check for room in outstanding command list. */
handle = req->current_outstanding_cmd;
for (index = 1; index < req->num_outstanding_cmds; index++) {
handle++;
if (handle == req->num_outstanding_cmds)
handle = 1;
if (!req->outstanding_cmds[handle])
break;
}
if (index == req->num_outstanding_cmds) {
rval = -1;
goto queuing_error;
}
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
if (req->cnt < (req_cnt + 2)) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
RD_REG_DWORD_RELAXED(req->req_q_out);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
req->cnt = req->length - (req->ring_index - cnt);
if (req->cnt < (req_cnt + 2)){
rval = -1;
goto queuing_error;
}
}
if (unlikely(!fd->sqid)) {
struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
if (cmd->sqe.common.opcode == nvme_admin_async_event) {
nvme->u.nvme.aen_op = 1;
atomic_inc(&vha->nvme_active_aen_cnt);
}
}
/* Build command packet. */
req->current_outstanding_cmd = handle;
req->outstanding_cmds[handle] = sp;
sp->handle = handle;
req->cnt -= req_cnt;
cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
/* Zero out remaining portion of packet. */
clr_ptr = (uint32_t *)cmd_pkt + 2;
memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
cmd_pkt->entry_status = 0;
/* Update entry type to indicate Command NVME IOCB */
cmd_pkt->entry_type = COMMAND_NVME;
/* No data transfer how do we check buffer len == 0?? */
if (fd->io_dir == NVMEFC_FCP_READ) {
cmd_pkt->control_flags =
cpu_to_le16(CF_READ_DATA | CF_NVME_ENABLE);
vha->qla_stats.input_bytes += fd->payload_length;
vha->qla_stats.input_requests++;
} else if (fd->io_dir == NVMEFC_FCP_WRITE) {
cmd_pkt->control_flags =
cpu_to_le16(CF_WRITE_DATA | CF_NVME_ENABLE);
vha->qla_stats.output_bytes += fd->payload_length;
vha->qla_stats.output_requests++;
} else if (fd->io_dir == 0) {
cmd_pkt->control_flags = cpu_to_le16(CF_NVME_ENABLE);
}
/* Set NPORT-ID */
cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
/* NVME RSP IU */
cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
cmd_pkt->nvme_rsp_dseg_address[0] = cpu_to_le32(LSD(fd->rspdma));
cmd_pkt->nvme_rsp_dseg_address[1] = cpu_to_le32(MSD(fd->rspdma));
/* NVME CNMD IU */
cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
cmd_pkt->nvme_cmnd_dseg_address[0] = cpu_to_le32(LSD(fd->cmddma));
cmd_pkt->nvme_cmnd_dseg_address[1] = cpu_to_le32(MSD(fd->cmddma));
cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
/* One DSD is available in the Command Type NVME IOCB */
avail_dsds = 1;
cur_dsd = (uint32_t *)&cmd_pkt->nvme_data_dseg_address[0];
sgl = fd->first_sgl;
/* Load data segments */
for_each_sg(sgl, sg, tot_dsds, i) {
dma_addr_t sle_dma;
cont_a64_entry_t *cont_pkt;
/* Allocate additional continuation packets? */
if (avail_dsds == 0) {
/*
* Five DSDs are available in the Continuation
* Type 1 IOCB.
*/
/* Adjust ring index */
req->ring_index++;
if (req->ring_index == req->length) {
req->ring_index = 0;
req->ring_ptr = req->ring;
} else {
req->ring_ptr++;
}
cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
cont_pkt->entry_type = cpu_to_le32(CONTINUE_A64_TYPE);
cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
avail_dsds = 5;
}
sle_dma = sg_dma_address(sg);
*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
avail_dsds--;
}
/* Set total entry count. */
cmd_pkt->entry_count = (uint8_t)req_cnt;
wmb();
/* Adjust ring index. */
req->ring_index++;
if (req->ring_index == req->length) {
req->ring_index = 0;
req->ring_ptr = req->ring;
} else {
req->ring_ptr++;
}
/* Set chip new ring index. */
WRT_REG_DWORD(req->req_q_in, req->ring_index);
queuing_error:
spin_unlock_irqrestore(&qpair->qp_lock, flags);
return rval;
}
/* Post a command */
static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
struct nvme_fc_remote_port *rport, void *hw_queue_handle,
struct nvmefc_fcp_req *fd)
{
fc_port_t *fcport;
struct srb_iocb *nvme;
struct scsi_qla_host *vha;
int rval = QLA_FUNCTION_FAILED;
srb_t *sp;
struct qla_qpair *qpair = (struct qla_qpair *)hw_queue_handle;
struct nvme_private *priv;
if (!fd) {
ql_log(ql_log_warn, NULL, 0x2134, "NO NVMe FCP reqeust\n");
return rval;
}
priv = fd->private;
fcport = (fc_port_t *)rport->private;
if (!fcport) {
ql_log(ql_log_warn, NULL, 0x210e, "No fcport ptr\n");
return rval;
}
vha = fcport->vha;
if ((!qpair) || (!(fcport->nvme_flag & NVME_FLAG_REGISTERED)))
return -EBUSY;
/* Alloc SRB structure */
sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC);
if (!sp)
return -EIO;
atomic_set(&sp->ref_count, 1);
init_waitqueue_head(&sp->nvme_ls_waitQ);
priv->sp = sp;
sp->type = SRB_NVME_CMD;
sp->name = "nvme_cmd";
sp->done = qla_nvme_sp_done;
sp->qpair = qpair;
nvme = &sp->u.iocb_cmd;
nvme->u.nvme.desc = fd;
rval = qla2x00_start_nvme_mq(sp);
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x212d,
"qla2x00_start_nvme_mq failed = %d\n", rval);
atomic_dec(&sp->ref_count);
wake_up(&sp->nvme_ls_waitQ);
return -EIO;
}
return rval;
}
static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
{
struct scsi_qla_host *vha = lport->private;
atomic_dec(&vha->nvme_ref_count);
wake_up_all(&vha->nvme_waitQ);
ql_log(ql_log_info, vha, 0x210f,
"localport delete of %p completed.\n", vha->nvme_local_port);
vha->nvme_local_port = NULL;
}
static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
{
fc_port_t *fcport;
struct nvme_rport *r_port, *trport;
fcport = (fc_port_t *)rport->private;
fcport->nvme_remote_port = NULL;
fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
atomic_dec(&fcport->nvme_ref_count);
wake_up_all(&fcport->nvme_waitQ);
list_for_each_entry_safe(r_port, trport,
&fcport->vha->nvme_rport_list, list) {
if (r_port->fcport == fcport) {
list_del(&r_port->list);
break;
}
}
kfree(r_port);
ql_log(ql_log_info, fcport->vha, 0x2110,
"remoteport_delete of %p completed.\n", fcport);
}
static struct nvme_fc_port_template qla_nvme_fc_transport = {
.localport_delete = qla_nvme_localport_delete,
.remoteport_delete = qla_nvme_remoteport_delete,
.create_queue = qla_nvme_alloc_queue,
.delete_queue = NULL,
.ls_req = qla_nvme_ls_req,
.ls_abort = qla_nvme_ls_abort,
.fcp_io = qla_nvme_post_cmd,
.fcp_abort = qla_nvme_fcp_abort,
.poll_queue = qla_nvme_poll,
.max_hw_queues = 8,
.max_sgl_segments = 128,
.max_dif_sgl_segments = 64,
.dma_boundary = 0xFFFFFFFF,
.local_priv_sz = 8,
.remote_priv_sz = 0,
.lsrqst_priv_sz = sizeof(struct nvme_private),
.fcprqst_priv_sz = sizeof(struct nvme_private),
};
#define NVME_ABORT_POLLING_PERIOD 2
static int qla_nvme_wait_on_command(srb_t *sp)
{
int ret = QLA_SUCCESS;
wait_event_timeout(sp->nvme_ls_waitQ, (atomic_read(&sp->ref_count) > 1),
NVME_ABORT_POLLING_PERIOD*HZ);
if (atomic_read(&sp->ref_count) > 1)
ret = QLA_FUNCTION_FAILED;
return ret;
}
static int qla_nvme_wait_on_rport_del(fc_port_t *fcport)
{
int ret = QLA_SUCCESS;
wait_event_timeout(fcport->nvme_waitQ,
atomic_read(&fcport->nvme_ref_count),
NVME_ABORT_POLLING_PERIOD*HZ);
if (atomic_read(&fcport->nvme_ref_count)) {
ret = QLA_FUNCTION_FAILED;
ql_log(ql_log_info, fcport->vha, 0x2111,
"timed out waiting for fcport=%p to delete\n", fcport);
}
return ret;
}
void qla_nvme_abort(struct qla_hw_data *ha, srb_t *sp)
{
int rval;
rval = ha->isp_ops->abort_command(sp);
if (!rval) {
if (!qla_nvme_wait_on_command(sp))
ql_log(ql_log_warn, NULL, 0x2112,
"nvme_wait_on_comand timed out waiting on sp=%p\n",
sp);
}
}
static void qla_nvme_abort_all(fc_port_t *fcport)
{
int que, cnt;
unsigned long flags;
srb_t *sp;
struct qla_hw_data *ha = fcport->vha->hw;
struct req_que *req;
spin_lock_irqsave(&ha->hardware_lock, flags);
for (que = 0; que < ha->max_req_queues; que++) {
req = ha->req_q_map[que];
if (!req)
continue;
if (!req->outstanding_cmds)
continue;
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if ((sp) && ((sp->type == SRB_NVME_CMD) ||
(sp->type == SRB_NVME_LS)) &&
(sp->fcport == fcport)) {
atomic_inc(&sp->ref_count);
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
qla_nvme_abort(ha, sp);
spin_lock_irqsave(&ha->hardware_lock, flags);
req->outstanding_cmds[cnt] = NULL;
sp->done(sp, 1);
}
}
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
static void qla_nvme_unregister_remote_port(struct work_struct *work)
{
#if (IS_ENABLED(CONFIG_NVME_FC))
struct fc_port *fcport = container_of(work, struct fc_port,
nvme_del_work);
struct nvme_rport *rport, *trport;
list_for_each_entry_safe(rport, trport,
&fcport->vha->nvme_rport_list, list) {
if (rport->fcport == fcport) {
ql_log(ql_log_info, fcport->vha, 0x2113,
"%s: fcport=%p\n", __func__, fcport);
nvme_fc_unregister_remoteport(
fcport->nvme_remote_port);
}
}
#endif
}
void qla_nvme_delete(scsi_qla_host_t *vha)
{
#if (IS_ENABLED(CONFIG_NVME_FC))
struct nvme_rport *rport, *trport;
fc_port_t *fcport;
int nv_ret;
list_for_each_entry_safe(rport, trport, &vha->nvme_rport_list, list) {
fcport = rport->fcport;
ql_log(ql_log_info, fcport->vha, 0x2114, "%s: fcport=%p\n",
__func__, fcport);
nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
qla_nvme_wait_on_rport_del(fcport);
qla_nvme_abort_all(fcport);
}
if (vha->nvme_local_port) {
nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
if (nv_ret == 0)
ql_log(ql_log_info, vha, 0x2116,
"unregistered localport=%p\n",
vha->nvme_local_port);
else
ql_log(ql_log_info, vha, 0x2115,
"Unregister of localport failed\n");
}
#endif
}
void qla_nvme_register_hba(scsi_qla_host_t *vha)
{
#if (IS_ENABLED(CONFIG_NVME_FC))
struct nvme_fc_port_template *tmpl;
struct qla_hw_data *ha;
struct nvme_fc_port_info pinfo;
int ret;
ha = vha->hw;
tmpl = &qla_nvme_fc_transport;
WARN_ON(vha->nvme_local_port);
WARN_ON(ha->max_req_queues < 3);
qla_nvme_fc_transport.max_hw_queues =
min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
(uint8_t)(ha->max_req_queues - 2));
pinfo.node_name = wwn_to_u64(vha->node_name);
pinfo.port_name = wwn_to_u64(vha->port_name);
pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
pinfo.port_id = vha->d_id.b24;
ql_log(ql_log_info, vha, 0xffff,
"register_localport: host-traddr=pn-0x%llx:nn-0x%llx on portID:%x\n",
pinfo.port_name, pinfo.node_name, pinfo.port_id);
qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
ret = nvme_fc_register_localport(&pinfo, tmpl,
get_device(&ha->pdev->dev), &vha->nvme_local_port);
if (ret) {
ql_log(ql_log_warn, vha, 0xffff,
"register_localport failed: ret=%x\n", ret);
return;
}
atomic_set(&vha->nvme_ref_count, 1);
vha->nvme_local_port->private = vha;
init_waitqueue_head(&vha->nvme_waitQ);
#endif
}

View File

@ -0,0 +1,132 @@
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2017 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
#ifndef __QLA_NVME_H
#define __QLA_NVME_H
#include <linux/blk-mq.h>
#include <uapi/scsi/fc/fc_fs.h>
#include <uapi/scsi/fc/fc_els.h>
#include <linux/nvme-fc-driver.h>
#define NVME_ATIO_CMD_OFF 32
#define NVME_FIRST_PACKET_CMDLEN (64 - NVME_ATIO_CMD_OFF)
#define Q2T_NVME_NUM_TAGS 2048
#define QLA_MAX_FC_SEGMENTS 64
struct srb;
struct nvme_private {
struct srb *sp;
struct nvmefc_ls_req *fd;
struct work_struct ls_work;
int comp_status;
};
struct nvme_rport {
struct nvme_fc_port_info req;
struct list_head list;
struct fc_port *fcport;
};
#define COMMAND_NVME 0x88 /* Command Type FC-NVMe IOCB */
struct cmd_nvme {
uint8_t entry_type; /* Entry type. */
uint8_t entry_count; /* Entry count. */
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
uint16_t nport_handle; /* N_PORT handle. */
uint16_t timeout; /* Command timeout. */
uint16_t dseg_count; /* Data segment count. */
uint16_t nvme_rsp_dsd_len; /* NVMe RSP DSD length */
uint64_t rsvd;
uint16_t control_flags; /* Control Flags */
#define CF_NVME_ENABLE BIT_9
#define CF_DIF_SEG_DESCR_ENABLE BIT_3
#define CF_DATA_SEG_DESCR_ENABLE BIT_2
#define CF_READ_DATA BIT_1
#define CF_WRITE_DATA BIT_0
uint16_t nvme_cmnd_dseg_len; /* Data segment length. */
uint32_t nvme_cmnd_dseg_address[2]; /* Data segment address. */
uint32_t nvme_rsp_dseg_address[2]; /* Data segment address. */
uint32_t byte_count; /* Total byte count. */
uint8_t port_id[3]; /* PortID of destination port. */
uint8_t vp_index;
uint32_t nvme_data_dseg_address[2]; /* Data segment address. */
uint32_t nvme_data_dseg_len; /* Data segment length. */
};
#define PT_LS4_REQUEST 0x89 /* Link Service pass-through IOCB (request) */
struct pt_ls4_request {
uint8_t entry_type;
uint8_t entry_count;
uint8_t sys_define;
uint8_t entry_status;
uint32_t handle;
uint16_t status;
uint16_t nport_handle;
uint16_t tx_dseg_count;
uint8_t vp_index;
uint8_t rsvd;
uint16_t timeout;
uint16_t control_flags;
#define CF_LS4_SHIFT 13
#define CF_LS4_ORIGINATOR 0
#define CF_LS4_RESPONDER 1
#define CF_LS4_RESPONDER_TERM 2
uint16_t rx_dseg_count;
uint16_t rsvd2;
uint32_t exchange_address;
uint32_t rsvd3;
uint32_t rx_byte_count;
uint32_t tx_byte_count;
uint32_t dseg0_address[2];
uint32_t dseg0_len;
uint32_t dseg1_address[2];
uint32_t dseg1_len;
};
#define PT_LS4_UNSOL 0x56 /* pass-up unsolicited rec FC-NVMe request */
struct pt_ls4_rx_unsol {
uint8_t entry_type;
uint8_t entry_count;
uint16_t rsvd0;
uint16_t rsvd1;
uint8_t vp_index;
uint8_t rsvd2;
uint16_t rsvd3;
uint16_t nport_handle;
uint16_t frame_size;
uint16_t rsvd4;
uint32_t exchange_address;
uint8_t d_id[3];
uint8_t r_ctl;
uint8_t s_id[3];
uint8_t cs_ctl;
uint8_t f_ctl[3];
uint8_t type;
uint16_t seq_cnt;
uint8_t df_ctl;
uint8_t seq_id;
uint16_t rx_id;
uint16_t ox_id;
uint32_t param;
uint32_t desc0;
#define PT_LS4_PAYLOAD_OFFSET 0x2c
#define PT_LS4_FIRST_PACKET_LEN 20
uint32_t desc_len;
uint32_t payload[3];
};
#endif

View File

@ -120,7 +120,11 @@ MODULE_PARM_DESC(ql2xmaxqdepth,
"Maximum queue depth to set for each LUN. "
"Default is 32.");
#if (IS_ENABLED(CONFIG_NVME_FC))
int ql2xenabledif;
#else
int ql2xenabledif = 2;
#endif
module_param(ql2xenabledif, int, S_IRUGO);
MODULE_PARM_DESC(ql2xenabledif,
" Enable T10-CRC-DIF:\n"
@ -129,6 +133,16 @@ MODULE_PARM_DESC(ql2xenabledif,
" 1 -- Enable DIF for all types\n"
" 2 -- Enable DIF for all types, except Type 0.\n");
#if (IS_ENABLED(CONFIG_NVME_FC))
int ql2xnvmeenable = 1;
#else
int ql2xnvmeenable;
#endif
module_param(ql2xnvmeenable, int, 0644);
MODULE_PARM_DESC(ql2xnvmeenable,
"Enables NVME support. "
"0 - no NVMe. Default is Y");
int ql2xenablehba_err_chk = 2;
module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xenablehba_err_chk,
@ -267,6 +281,7 @@ static void qla2x00_clear_drv_active(struct qla_hw_data *);
static void qla2x00_free_device(scsi_qla_host_t *);
static void qla83xx_disable_laser(scsi_qla_host_t *vha);
static int qla2xxx_map_queues(struct Scsi_Host *shost);
static void qla2x00_destroy_deferred_work(struct qla_hw_data *);
struct scsi_host_template qla2xxx_driver_template = {
.module = THIS_MODULE,
@ -695,7 +710,7 @@ qla2x00_sp_free_dma(void *ptr)
}
end:
if (sp->type != SRB_NVME_CMD) {
if ((sp->type != SRB_NVME_CMD) && (sp->type != SRB_NVME_LS)) {
CMD_SP(cmd) = NULL;
qla2x00_rel_sp(sp);
}
@ -1700,15 +1715,23 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
if (sp) {
req->outstanding_cmds[cnt] = NULL;
if (sp->cmd_type == TYPE_SRB) {
/*
* Don't abort commands in adapter
* during EEH recovery as it's not
* accessible/responding.
*/
if (GET_CMD_SP(sp) &&
if ((sp->type == SRB_NVME_CMD) ||
(sp->type == SRB_NVME_LS)) {
sp_get(sp);
spin_unlock_irqrestore(
&ha->hardware_lock, flags);
qla_nvme_abort(ha, sp);
spin_lock_irqsave(
&ha->hardware_lock, flags);
} else if (GET_CMD_SP(sp) &&
!ha->flags.eeh_busy &&
(sp->type == SRB_SCSI_CMD)) {
/*
* Don't abort commands in
* adapter during EEH
* recovery as it's not
* accessible/responding.
*
* Get a reference to the sp
* and drop the lock. The
* reference ensures this
@ -3534,6 +3557,9 @@ qla2x00_remove_one(struct pci_dev *pdev)
return;
set_bit(UNLOADING, &base_vha->dpc_flags);
qla_nvme_delete(base_vha);
dma_free_coherent(&ha->pdev->dev,
base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);