scsi: qedf: Add QLogic FastLinQ offload FCoE driver framework.

The QLogic FastLinQ Driver for FCoE (qedf) is the FCoE specific module
for 41000 Series Converged Network Adapters by QLogic. This patch
consists of following changes:

- MAINTAINERS Makefile and Kconfig changes for qedf
- PCI driver registration
- libfc/fcoe host level initialization
- SCSI host template initialization and callbacks
- Debugfs and log level infrastructure
- Link handling
- Firmware interface structures
- QED core module initialization
- Light L2 interface callbacks
- I/O request initialization
- Firmware I/O completion handling
- Firmware ELS request/response handling
- FIP request/response handled by the driver itself

Signed-off-by: Nilesh Javali <nilesh.javali@cavium.com>
Signed-off-by: Manish Rangankar <manish.rangankar@cavium.com>
Signed-off-by: Saurav Kashyap <saurav.kashyap@cavium.com>
Signed-off-by: Arun Easi <arun.easi@cavium.com>
Signed-off-by: Chad Dupuis <chad.dupuis@cavium.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
Dupuis, Chad 2017-02-15 06:28:23 -08:00 committed by Martin K. Petersen
parent 67f2db8792
commit 61d8658b4a
16 changed files with 8816 additions and 0 deletions

View File

@ -10242,6 +10242,12 @@ L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/qedi/
QLOGIC QL41xxx FCOE DRIVER
M: QLogic-Storage-Upstream@cavium.com
L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/qedf/
QNX4 FILESYSTEM
M: Anders Larsen <al@alarsen.net>
W: http://www.alarsen.net/linux/qnx4fs/

View File

@ -1235,6 +1235,7 @@ config SCSI_QLOGICPTI
source "drivers/scsi/qla2xxx/Kconfig"
source "drivers/scsi/qla4xxx/Kconfig"
source "drivers/scsi/qedi/Kconfig"
source "drivers/scsi/qedf/Kconfig"
config SCSI_LPFC
tristate "Emulex LightPulse Fibre Channel Support"

View File

@ -41,6 +41,7 @@ obj-$(CONFIG_FCOE) += fcoe/
obj-$(CONFIG_FCOE_FNIC) += fnic/
obj-$(CONFIG_SCSI_SNIC) += snic/
obj-$(CONFIG_SCSI_BNX2X_FCOE) += libfc/ fcoe/ bnx2fc/
obj-$(CONFIG_QEDF) += qedf/
obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o
obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
obj-$(CONFIG_ISCSI_BOOT_SYSFS) += iscsi_boot_sysfs.o

11
drivers/scsi/qedf/Kconfig Normal file
View File

@ -0,0 +1,11 @@
config QEDF
tristate "QLogic QEDF 25/40/100Gb FCoE Initiator Driver Support"
depends on PCI && SCSI
depends on QED
depends on LIBFC
depends on LIBFCOE
select QED_LL2
select QED_FCOE
---help---
This driver supports FCoE offload for the QLogic FastLinQ
41000 Series Converged Network Adapters.

View File

@ -0,0 +1,5 @@
obj-$(CONFIG_QEDF) := qedf.o
qedf-y = qedf_dbg.o qedf_main.o qedf_io.o qedf_fip.o \
qedf_attr.o qedf_els.o
qedf-$(CONFIG_DEBUG_FS) += qedf_debugfs.o

545
drivers/scsi/qedf/qedf.h Normal file
View File

@ -0,0 +1,545 @@
/*
* QLogic FCoE Offload Driver
* Copyright (c) 2016 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef _QEDFC_H_
#define _QEDFC_H_
#include <scsi/libfcoe.h>
#include <scsi/libfc.h>
#include <scsi/fc/fc_fip.h>
#include <scsi/fc/fc_fc2.h>
#include <scsi/scsi_tcq.h>
#include <scsi/fc_encode.h>
#include <linux/version.h>
/* qedf_hsi.h needs to before included any qed includes */
#include "qedf_hsi.h"
#include <linux/qed/qed_if.h>
#include <linux/qed/qed_fcoe_if.h>
#include <linux/qed/qed_ll2_if.h>
#include "qedf_version.h"
#include "qedf_dbg.h"
/* Helpers to extract upper and lower 32-bits of pointer */
#define U64_HI(val) ((u32)(((u64)(val)) >> 32))
#define U64_LO(val) ((u32)(((u64)(val)) & 0xffffffff))
#define QEDF_DESCR "QLogic FCoE Offload Driver"
#define QEDF_MODULE_NAME "qedf"
#define QEDF_MIN_XID 0
#define QEDF_MAX_SCSI_XID (NUM_TASKS_PER_CONNECTION - 1)
#define QEDF_MAX_ELS_XID 4095
#define QEDF_FLOGI_RETRY_CNT 3
#define QEDF_RPORT_RETRY_CNT 255
#define QEDF_MAX_SESSIONS 1024
#define QEDF_MAX_PAYLOAD 2048
#define QEDF_MAX_BDS_PER_CMD 256
#define QEDF_MAX_BD_LEN 0xffff
#define QEDF_BD_SPLIT_SZ 0x1000
#define QEDF_PAGE_SIZE 4096
#define QED_HW_DMA_BOUNDARY 0xfff
#define QEDF_MAX_SGLEN_FOR_CACHESGL ((1U << 16) - 1)
#define QEDF_MFS (QEDF_MAX_PAYLOAD + \
sizeof(struct fc_frame_header))
#define QEDF_MAX_NPIV 64
#define QEDF_TM_TIMEOUT 10
#define QEDF_ABORT_TIMEOUT 10
#define QEDF_CLEANUP_TIMEOUT 10
#define QEDF_MAX_CDB_LEN 16
#define UPSTREAM_REMOVE 1
#define UPSTREAM_KEEP 1
struct qedf_mp_req {
uint8_t tm_flags;
uint32_t req_len;
void *req_buf;
dma_addr_t req_buf_dma;
struct fcoe_sge *mp_req_bd;
dma_addr_t mp_req_bd_dma;
struct fc_frame_header req_fc_hdr;
uint32_t resp_len;
void *resp_buf;
dma_addr_t resp_buf_dma;
struct fcoe_sge *mp_resp_bd;
dma_addr_t mp_resp_bd_dma;
struct fc_frame_header resp_fc_hdr;
};
struct qedf_els_cb_arg {
struct qedf_ioreq *aborted_io_req;
struct qedf_ioreq *io_req;
u8 op; /* Used to keep track of ELS op */
uint16_t l2_oxid;
u32 offset; /* Used for sequence cleanup */
u8 r_ctl; /* Used for sequence cleanup */
};
enum qedf_ioreq_event {
QEDF_IOREQ_EV_ABORT_SUCCESS,
QEDF_IOREQ_EV_ABORT_FAILED,
QEDF_IOREQ_EV_SEND_RRQ,
QEDF_IOREQ_EV_ELS_TMO,
QEDF_IOREQ_EV_ELS_ERR_DETECT,
QEDF_IOREQ_EV_ELS_FLUSH,
QEDF_IOREQ_EV_CLEANUP_SUCCESS,
QEDF_IOREQ_EV_CLEANUP_FAILED,
};
#define FC_GOOD 0
#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER (0x1<<2)
#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER (0x1<<3)
#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0)
#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID (0x1<<1)
struct qedf_ioreq {
struct list_head link;
uint16_t xid;
struct scsi_cmnd *sc_cmd;
bool use_slowpath; /* Use slow SGL for this I/O */
#define QEDF_SCSI_CMD 1
#define QEDF_TASK_MGMT_CMD 2
#define QEDF_ABTS 3
#define QEDF_ELS 4
#define QEDF_CLEANUP 5
#define QEDF_SEQ_CLEANUP 6
u8 cmd_type;
#define QEDF_CMD_OUTSTANDING 0x0
#define QEDF_CMD_IN_ABORT 0x1
#define QEDF_CMD_IN_CLEANUP 0x2
#define QEDF_CMD_SRR_SENT 0x3
u8 io_req_flags;
struct qedf_rport *fcport;
unsigned long flags;
enum qedf_ioreq_event event;
size_t data_xfer_len;
struct kref refcount;
struct qedf_cmd_mgr *cmd_mgr;
struct io_bdt *bd_tbl;
struct delayed_work timeout_work;
struct completion tm_done;
struct completion abts_done;
struct fcoe_task_context *task;
int idx;
/*
* Need to allocate enough room for both sense data and FCP response data
* which has a max length of 8 bytes according to spec.
*/
#define QEDF_SCSI_SENSE_BUFFERSIZE (SCSI_SENSE_BUFFERSIZE + 8)
uint8_t *sense_buffer;
dma_addr_t sense_buffer_dma;
u32 fcp_resid;
u32 fcp_rsp_len;
u32 fcp_sns_len;
u8 cdb_status;
u8 fcp_status;
u8 fcp_rsp_code;
u8 scsi_comp_flags;
#define QEDF_MAX_REUSE 0xfff
u16 reuse_count;
struct qedf_mp_req mp_req;
void (*cb_func)(struct qedf_els_cb_arg *cb_arg);
struct qedf_els_cb_arg *cb_arg;
int fp_idx;
unsigned int cpu;
unsigned int int_cpu;
#define QEDF_IOREQ_SLOW_SGE 0
#define QEDF_IOREQ_SINGLE_SGE 1
#define QEDF_IOREQ_FAST_SGE 2
u8 sge_type;
struct delayed_work rrq_work;
/* Used for sequence level recovery; i.e. REC/SRR */
uint32_t rx_buf_off;
uint32_t tx_buf_off;
uint32_t rx_id;
uint32_t task_retry_identifier;
/*
* Used to tell if we need to return a SCSI command
* during some form of error processing.
*/
bool return_scsi_cmd_on_abts;
};
extern struct workqueue_struct *qedf_io_wq;
struct qedf_rport {
spinlock_t rport_lock;
#define QEDF_RPORT_SESSION_READY 1
#define QEDF_RPORT_UPLOADING_CONNECTION 2
unsigned long flags;
unsigned long retry_delay_timestamp;
struct fc_rport *rport;
struct fc_rport_priv *rdata;
struct qedf_ctx *qedf;
u32 handle; /* Handle from qed */
u32 fw_cid; /* fw_cid from qed */
void __iomem *p_doorbell;
/* Send queue management */
atomic_t free_sqes;
atomic_t num_active_ios;
struct fcoe_wqe *sq;
dma_addr_t sq_dma;
u16 sq_prod_idx;
u16 fw_sq_prod_idx;
u16 sq_con_idx;
u32 sq_mem_size;
void *sq_pbl;
dma_addr_t sq_pbl_dma;
u32 sq_pbl_size;
u32 sid;
#define QEDF_RPORT_TYPE_DISK 1
#define QEDF_RPORT_TYPE_TAPE 2
uint dev_type; /* Disk or tape */
struct list_head peers;
};
/* Used to contain LL2 skb's in ll2_skb_list */
struct qedf_skb_work {
struct work_struct work;
struct sk_buff *skb;
struct qedf_ctx *qedf;
};
struct qedf_fastpath {
#define QEDF_SB_ID_NULL 0xffff
u16 sb_id;
struct qed_sb_info *sb_info;
struct qedf_ctx *qedf;
/* Keep track of number of completions on this fastpath */
unsigned long completions;
uint32_t cq_num_entries;
};
/* Used to pass fastpath information needed to process CQEs */
struct qedf_io_work {
struct work_struct work;
struct fcoe_cqe cqe;
struct qedf_ctx *qedf;
struct fc_frame *fp;
};
struct qedf_glbl_q_params {
u64 hw_p_cq; /* Completion queue PBL */
u64 hw_p_rq; /* Request queue PBL */
u64 hw_p_cmdq; /* Command queue PBL */
};
struct global_queue {
struct fcoe_cqe *cq;
dma_addr_t cq_dma;
u32 cq_mem_size;
u32 cq_cons_idx; /* Completion queue consumer index */
u32 cq_prod_idx;
void *cq_pbl;
dma_addr_t cq_pbl_dma;
u32 cq_pbl_size;
};
/* I/O tracing entry */
#define QEDF_IO_TRACE_SIZE 2048
struct qedf_io_log {
#define QEDF_IO_TRACE_REQ 0
#define QEDF_IO_TRACE_RSP 1
uint8_t direction;
uint16_t task_id;
uint32_t port_id; /* Remote port fabric ID */
int lun;
char op; /* SCSI CDB */
uint8_t lba[4];
unsigned int bufflen; /* SCSI buffer length */
unsigned int sg_count; /* Number of SG elements */
int result; /* Result passed back to mid-layer */
unsigned long jiffies; /* Time stamp when I/O logged */
int refcount; /* Reference count for task id */
unsigned int req_cpu; /* CPU that the task is queued on */
unsigned int int_cpu; /* Interrupt CPU that the task is received on */
unsigned int rsp_cpu; /* CPU that task is returned on */
u8 sge_type; /* Did we take the slow, single or fast SGE path */
};
/* Number of entries in BDQ */
#define QEDF_BDQ_SIZE 256
#define QEDF_BDQ_BUF_SIZE 2072
/* DMA coherent buffers for BDQ */
struct qedf_bdq_buf {
void *buf_addr;
dma_addr_t buf_dma;
};
/* Main adapter struct */
struct qedf_ctx {
struct qedf_dbg_ctx dbg_ctx;
struct fcoe_ctlr ctlr;
struct fc_lport *lport;
u8 data_src_addr[ETH_ALEN];
#define QEDF_LINK_DOWN 0
#define QEDF_LINK_UP 1
atomic_t link_state;
#define QEDF_DCBX_PENDING 0
#define QEDF_DCBX_DONE 1
atomic_t dcbx;
uint16_t max_scsi_xid;
uint16_t max_els_xid;
#define QEDF_NULL_VLAN_ID -1
#define QEDF_FALLBACK_VLAN 1002
#define QEDF_DEFAULT_PRIO 3
int vlan_id;
uint vlan_hw_insert:1;
struct qed_dev *cdev;
struct qed_dev_fcoe_info dev_info;
struct qed_int_info int_info;
uint16_t last_command;
spinlock_t hba_lock;
struct pci_dev *pdev;
u64 wwnn;
u64 wwpn;
u8 __aligned(16) mac[ETH_ALEN];
struct list_head fcports;
atomic_t num_offloads;
unsigned int curr_conn_id;
struct workqueue_struct *ll2_recv_wq;
struct workqueue_struct *link_update_wq;
struct delayed_work link_update;
struct delayed_work link_recovery;
struct completion flogi_compl;
struct completion fipvlan_compl;
/*
* Used to tell if we're in the window where we are waiting for
* the link to come back up before informting fcoe that the link is
* done.
*/
atomic_t link_down_tmo_valid;
#define QEDF_TIMER_INTERVAL (1 * HZ)
struct timer_list timer; /* One second book keeping timer */
#define QEDF_DRAIN_ACTIVE 1
#define QEDF_LL2_STARTED 2
#define QEDF_UNLOADING 3
#define QEDF_GRCDUMP_CAPTURE 4
#define QEDF_IN_RECOVERY 5
#define QEDF_DBG_STOP_IO 6
unsigned long flags; /* Miscellaneous state flags */
int fipvlan_retries;
u8 num_queues;
struct global_queue **global_queues;
/* Pointer to array of queue structures */
struct qedf_glbl_q_params *p_cpuq;
/* Physical address of array of queue structures */
dma_addr_t hw_p_cpuq;
struct qedf_bdq_buf bdq[QEDF_BDQ_SIZE];
void *bdq_pbl;
dma_addr_t bdq_pbl_dma;
size_t bdq_pbl_mem_size;
void *bdq_pbl_list;
dma_addr_t bdq_pbl_list_dma;
u8 bdq_pbl_list_num_entries;
void __iomem *bdq_primary_prod;
void __iomem *bdq_secondary_prod;
uint16_t bdq_prod_idx;
/* Structure for holding all the fastpath for this qedf_ctx */
struct qedf_fastpath *fp_array;
struct qed_fcoe_tid tasks;
struct qedf_cmd_mgr *cmd_mgr;
/* Holds the PF parameters we pass to qed to start he FCoE function */
struct qed_pf_params pf_params;
/* Used to time middle path ELS and TM commands */
struct workqueue_struct *timer_work_queue;
#define QEDF_IO_WORK_MIN 64
mempool_t *io_mempool;
struct workqueue_struct *dpc_wq;
u32 slow_sge_ios;
u32 fast_sge_ios;
u32 single_sge_ios;
uint8_t *grcdump;
uint32_t grcdump_size;
struct qedf_io_log io_trace_buf[QEDF_IO_TRACE_SIZE];
spinlock_t io_trace_lock;
uint16_t io_trace_idx;
bool stop_io_on_error;
u32 flogi_cnt;
u32 flogi_failed;
/* Used for fc statistics */
u64 input_requests;
u64 output_requests;
u64 control_requests;
u64 packet_aborts;
u64 alloc_failures;
};
struct io_bdt {
struct qedf_ioreq *io_req;
struct fcoe_sge *bd_tbl;
dma_addr_t bd_tbl_dma;
u16 bd_valid;
};
struct qedf_cmd_mgr {
struct qedf_ctx *qedf;
u16 idx;
struct io_bdt **io_bdt_pool;
#define FCOE_PARAMS_NUM_TASKS 4096
struct qedf_ioreq cmds[FCOE_PARAMS_NUM_TASKS];
spinlock_t lock;
atomic_t free_list_cnt;
};
/* Stolen from qed_cxt_api.h and adapted for qed_fcoe_info
* Usage:
*
* void *ptr;
* ptr = qedf_get_task_mem(&qedf->tasks, 128);
*/
static inline void *qedf_get_task_mem(struct qed_fcoe_tid *info, u32 tid)
{
return (void *)(info->blocks[tid / info->num_tids_per_block] +
(tid % info->num_tids_per_block) * info->size);
}
static inline void qedf_stop_all_io(struct qedf_ctx *qedf)
{
set_bit(QEDF_DBG_STOP_IO, &qedf->flags);
}
/*
* Externs
*/
#define QEDF_DEFAULT_LOG_MASK 0x3CFB6
extern const struct qed_fcoe_ops *qed_ops;
extern uint qedf_dump_frames;
extern uint qedf_io_tracing;
extern uint qedf_stop_io_on_error;
extern uint qedf_link_down_tmo;
#define QEDF_RETRY_DELAY_MAX 20 /* 2 seconds */
extern bool qedf_retry_delay;
extern uint qedf_debug;
extern struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf);
extern void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr);
extern int qedf_queuecommand(struct Scsi_Host *host,
struct scsi_cmnd *sc_cmd);
extern void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb);
extern void qedf_update_src_mac(struct fc_lport *lport, u8 *addr);
extern u8 *qedf_get_src_mac(struct fc_lport *lport);
extern void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb);
extern void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf);
extern void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
struct qedf_ioreq *io_req);
extern void qedf_process_warning_compl(struct qedf_ctx *qedf,
struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
extern void qedf_process_error_detect(struct qedf_ctx *qedf,
struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
extern void qedf_flush_active_ios(struct qedf_rport *fcport, int lun);
extern void qedf_release_cmd(struct kref *ref);
extern int qedf_initiate_abts(struct qedf_ioreq *io_req,
bool return_scsi_cmd_on_abts);
extern void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
struct qedf_ioreq *io_req);
extern struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport,
u8 cmd_type);
extern struct device_attribute *qedf_host_attrs[];
extern void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
unsigned int timer_msec);
extern int qedf_init_mp_req(struct qedf_ioreq *io_req);
extern void qedf_init_mp_task(struct qedf_ioreq *io_req,
struct fcoe_task_context *task_ctx);
extern void qedf_add_to_sq(struct qedf_rport *fcport, u16 xid,
u32 ptu_invalidate, enum fcoe_task_type req_type, u32 offset);
extern void qedf_ring_doorbell(struct qedf_rport *fcport);
extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
struct qedf_ioreq *els_req);
extern int qedf_send_rrq(struct qedf_ioreq *aborted_io_req);
extern int qedf_send_adisc(struct qedf_rport *fcport, struct fc_frame *fp);
extern int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
bool return_scsi_cmd_on_abts);
extern void qedf_process_cleanup_compl(struct qedf_ctx *qedf,
struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
extern int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags);
extern void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
struct qedf_ioreq *io_req);
extern void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe);
extern void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
int result);
extern void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id);
extern void qedf_create_sysfs_ctx_attr(struct qedf_ctx *qedf);
extern void qedf_remove_sysfs_ctx_attr(struct qedf_ctx *qedf);
extern void qedf_capture_grc_dump(struct qedf_ctx *qedf);
extern void qedf_wait_for_upload(struct qedf_ctx *qedf);
extern void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
struct fcoe_cqe *cqe);
extern void qedf_restart_rport(struct qedf_rport *fcport);
extern int qedf_send_rec(struct qedf_ioreq *orig_io_req);
extern int qedf_post_io_req(struct qedf_rport *fcport,
struct qedf_ioreq *io_req);
extern void qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf,
struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
extern int qedf_send_flogi(struct qedf_ctx *qedf);
extern void qedf_fp_io_handler(struct work_struct *work);
#define FCOE_WORD_TO_BYTE 4
#define QEDF_MAX_TASK_NUM 0xFFFF
struct fip_vlan {
struct ethhdr eth;
struct fip_header fip;
struct {
struct fip_mac_desc mac;
struct fip_wwn_desc wwnn;
} desc;
};
/* SQ/CQ Sizes */
#define GBL_RSVD_TASKS 16
#define NUM_TASKS_PER_CONNECTION 1024
#define NUM_RW_TASKS_PER_CONNECTION 512
#define FCOE_PARAMS_CQ_NUM_ENTRIES FCOE_PARAMS_NUM_TASKS
#define FCOE_PARAMS_CMDQ_NUM_ENTRIES FCOE_PARAMS_NUM_TASKS
#define SQ_NUM_ENTRIES NUM_TASKS_PER_CONNECTION
#define QEDF_FCOE_PARAMS_GL_RQ_PI 0
#define QEDF_FCOE_PARAMS_GL_CMD_PI 1
#define QEDF_READ (1 << 1)
#define QEDF_WRITE (1 << 0)
#define MAX_FIBRE_LUNS 0xffffffff
#define QEDF_MAX_NUM_CQS 8
/*
* PCI function probe defines
*/
/* Probe/remove called during normal PCI probe */
#define QEDF_MODE_NORMAL 0
/* Probe/remove called from qed error recovery */
#define QEDF_MODE_RECOVERY 1
#define SUPPORTED_25000baseKR_Full (1<<27)
#define SUPPORTED_50000baseKR2_Full (1<<28)
#define SUPPORTED_100000baseKR4_Full (1<<29)
#define SUPPORTED_100000baseCR4_Full (1<<30)
#endif

View File

@ -0,0 +1,165 @@
/*
* QLogic FCoE Offload Driver
* Copyright (c) 2016 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#include "qedf.h"
static ssize_t
qedf_fcoe_mac_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fc_lport *lport = shost_priv(class_to_shost(dev));
u32 port_id;
u8 lport_src_id[3];
u8 fcoe_mac[6];
port_id = fc_host_port_id(lport->host);
lport_src_id[2] = (port_id & 0x000000FF);
lport_src_id[1] = (port_id & 0x0000FF00) >> 8;
lport_src_id[0] = (port_id & 0x00FF0000) >> 16;
fc_fcoe_set_mac(fcoe_mac, lport_src_id);
return scnprintf(buf, PAGE_SIZE, "%pM\n", fcoe_mac);
}
static DEVICE_ATTR(fcoe_mac, S_IRUGO, qedf_fcoe_mac_show, NULL);
struct device_attribute *qedf_host_attrs[] = {
&dev_attr_fcoe_mac,
NULL,
};
extern const struct qed_fcoe_ops *qed_ops;
inline bool qedf_is_vport(struct qedf_ctx *qedf)
{
return (!(qedf->lport->vport == NULL));
}
/* Get base qedf for physical port from vport */
static struct qedf_ctx *qedf_get_base_qedf(struct qedf_ctx *qedf)
{
struct fc_lport *lport;
struct fc_lport *base_lport;
if (!(qedf_is_vport(qedf)))
return NULL;
lport = qedf->lport;
base_lport = shost_priv(vport_to_shost(lport->vport));
return (struct qedf_ctx *)(lport_priv(base_lport));
}
void qedf_capture_grc_dump(struct qedf_ctx *qedf)
{
struct qedf_ctx *base_qedf;
/* Make sure we use the base qedf to take the GRC dump */
if (qedf_is_vport(qedf))
base_qedf = qedf_get_base_qedf(qedf);
else
base_qedf = qedf;
if (test_bit(QEDF_GRCDUMP_CAPTURE, &base_qedf->flags)) {
QEDF_INFO(&(base_qedf->dbg_ctx), QEDF_LOG_INFO,
"GRC Dump already captured.\n");
return;
}
qedf_get_grc_dump(base_qedf->cdev, qed_ops->common,
&base_qedf->grcdump, &base_qedf->grcdump_size);
QEDF_ERR(&(base_qedf->dbg_ctx), "GRC Dump captured.\n");
set_bit(QEDF_GRCDUMP_CAPTURE, &base_qedf->flags);
qedf_uevent_emit(base_qedf->lport->host, QEDF_UEVENT_CODE_GRCDUMP,
NULL);
}
static ssize_t
qedf_sysfs_read_grcdump(struct file *filep, struct kobject *kobj,
struct bin_attribute *ba, char *buf, loff_t off,
size_t count)
{
ssize_t ret = 0;
struct fc_lport *lport = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
struct qedf_ctx *qedf = lport_priv(lport);
if (test_bit(QEDF_GRCDUMP_CAPTURE, &qedf->flags)) {
ret = memory_read_from_buffer(buf, count, &off,
qedf->grcdump, qedf->grcdump_size);
} else {
QEDF_ERR(&(qedf->dbg_ctx), "GRC Dump not captured!\n");
}
return ret;
}
static ssize_t
qedf_sysfs_write_grcdump(struct file *filep, struct kobject *kobj,
struct bin_attribute *ba, char *buf, loff_t off,
size_t count)
{
struct fc_lport *lport = NULL;
struct qedf_ctx *qedf = NULL;
long reading;
int ret = 0;
char msg[40];
if (off != 0)
return ret;
lport = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
qedf = lport_priv(lport);
buf[1] = 0;
ret = kstrtol(buf, 10, &reading);
if (ret) {
QEDF_ERR(&(qedf->dbg_ctx), "Invalid input, err(%d)\n", ret);
return ret;
}
memset(msg, 0, sizeof(msg));
switch (reading) {
case 0:
memset(qedf->grcdump, 0, qedf->grcdump_size);
clear_bit(QEDF_GRCDUMP_CAPTURE, &qedf->flags);
break;
case 1:
qedf_capture_grc_dump(qedf);
break;
}
return count;
}
static struct bin_attribute sysfs_grcdump_attr = {
.attr = {
.name = "grcdump",
.mode = S_IRUSR | S_IWUSR,
},
.size = 0,
.read = qedf_sysfs_read_grcdump,
.write = qedf_sysfs_write_grcdump,
};
static struct sysfs_bin_attrs bin_file_entries[] = {
{"grcdump", &sysfs_grcdump_attr},
{NULL},
};
void qedf_create_sysfs_ctx_attr(struct qedf_ctx *qedf)
{
qedf_create_sysfs_attr(qedf->lport->host, bin_file_entries);
}
void qedf_remove_sysfs_ctx_attr(struct qedf_ctx *qedf)
{
qedf_remove_sysfs_attr(qedf->lport->host, bin_file_entries);
}

View File

@ -0,0 +1,195 @@
/*
* QLogic FCoE Offload Driver
* Copyright (c) 2016 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#include "qedf_dbg.h"
#include <linux/vmalloc.h>
void
qedf_dbg_err(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
const char *fmt, ...)
{
va_list va;
struct va_format vaf;
char nfunc[32];
memset(nfunc, 0, sizeof(nfunc));
memcpy(nfunc, func, sizeof(nfunc) - 1);
va_start(va, fmt);
vaf.fmt = fmt;
vaf.va = &va;
if (likely(qedf) && likely(qedf->pdev))
pr_err("[%s]:[%s:%d]:%d: %pV", dev_name(&(qedf->pdev->dev)),
nfunc, line, qedf->host_no, &vaf);
else
pr_err("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
va_end(va);
}
void
qedf_dbg_warn(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
const char *fmt, ...)
{
va_list va;
struct va_format vaf;
char nfunc[32];
memset(nfunc, 0, sizeof(nfunc));
memcpy(nfunc, func, sizeof(nfunc) - 1);
va_start(va, fmt);
vaf.fmt = fmt;
vaf.va = &va;
if (!(qedf_debug & QEDF_LOG_WARN))
goto ret;
if (likely(qedf) && likely(qedf->pdev))
pr_warn("[%s]:[%s:%d]:%d: %pV", dev_name(&(qedf->pdev->dev)),
nfunc, line, qedf->host_no, &vaf);
else
pr_warn("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
ret:
va_end(va);
}
void
qedf_dbg_notice(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
const char *fmt, ...)
{
va_list va;
struct va_format vaf;
char nfunc[32];
memset(nfunc, 0, sizeof(nfunc));
memcpy(nfunc, func, sizeof(nfunc) - 1);
va_start(va, fmt);
vaf.fmt = fmt;
vaf.va = &va;
if (!(qedf_debug & QEDF_LOG_NOTICE))
goto ret;
if (likely(qedf) && likely(qedf->pdev))
pr_notice("[%s]:[%s:%d]:%d: %pV",
dev_name(&(qedf->pdev->dev)), nfunc, line,
qedf->host_no, &vaf);
else
pr_notice("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
ret:
va_end(va);
}
void
qedf_dbg_info(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
u32 level, const char *fmt, ...)
{
va_list va;
struct va_format vaf;
char nfunc[32];
memset(nfunc, 0, sizeof(nfunc));
memcpy(nfunc, func, sizeof(nfunc) - 1);
va_start(va, fmt);
vaf.fmt = fmt;
vaf.va = &va;
if (!(qedf_debug & level))
goto ret;
if (likely(qedf) && likely(qedf->pdev))
pr_info("[%s]:[%s:%d]:%d: %pV", dev_name(&(qedf->pdev->dev)),
nfunc, line, qedf->host_no, &vaf);
else
pr_info("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
ret:
va_end(va);
}
int
qedf_alloc_grc_dump_buf(u8 **buf, uint32_t len)
{
*buf = vmalloc(len);
if (!(*buf))
return -ENOMEM;
memset(*buf, 0, len);
return 0;
}
void
qedf_free_grc_dump_buf(uint8_t **buf)
{
vfree(*buf);
*buf = NULL;
}
int
qedf_get_grc_dump(struct qed_dev *cdev, const struct qed_common_ops *common,
u8 **buf, uint32_t *grcsize)
{
if (!*buf)
return -EINVAL;
return common->dbg_grc(cdev, *buf, grcsize);
}
void
qedf_uevent_emit(struct Scsi_Host *shost, u32 code, char *msg)
{
char event_string[40];
char *envp[] = {event_string, NULL};
memset(event_string, 0, sizeof(event_string));
switch (code) {
case QEDF_UEVENT_CODE_GRCDUMP:
if (msg)
strncpy(event_string, msg, strlen(msg));
else
sprintf(event_string, "GRCDUMP=%u", shost->host_no);
break;
default:
/* do nothing */
break;
}
kobject_uevent_env(&shost->shost_gendev.kobj, KOBJ_CHANGE, envp);
}
int
qedf_create_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter)
{
int ret = 0;
for (; iter->name; iter++) {
ret = sysfs_create_bin_file(&shost->shost_gendev.kobj,
iter->attr);
if (ret)
pr_err("Unable to create sysfs %s attr, err(%d).\n",
iter->name, ret);
}
return ret;
}
void
qedf_remove_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter)
{
for (; iter->name; iter++)
sysfs_remove_bin_file(&shost->shost_gendev.kobj, iter->attr);
}

View File

@ -0,0 +1,154 @@
/*
* QLogic FCoE Offload Driver
* Copyright (c) 2016 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef _QEDF_DBG_H_
#define _QEDF_DBG_H_
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/compiler.h>
#include <linux/string.h>
#include <linux/version.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <scsi/scsi_transport.h>
#include <linux/fs.h>
#include <linux/qed/common_hsi.h>
#include <linux/qed/qed_if.h>
extern uint qedf_debug;
/* Debug print level definitions */
#define QEDF_LOG_DEFAULT 0x1 /* Set default logging mask */
#define QEDF_LOG_INFO 0x2 /*
* Informational logs,
* MAC address, WWPN, WWNN
*/
#define QEDF_LOG_DISC 0x4 /* Init, discovery, rport */
#define QEDF_LOG_LL2 0x8 /* LL2, VLAN logs */
#define QEDF_LOG_CONN 0x10 /* Connection setup, cleanup */
#define QEDF_LOG_EVT 0x20 /* Events, link, mtu */
#define QEDF_LOG_TIMER 0x40 /* Timer events */
#define QEDF_LOG_MP_REQ 0x80 /* Middle Path (MP) logs */
#define QEDF_LOG_SCSI_TM 0x100 /* SCSI Aborts, Task Mgmt */
#define QEDF_LOG_UNSOL 0x200 /* unsolicited event logs */
#define QEDF_LOG_IO 0x400 /* scsi cmd, completion */
#define QEDF_LOG_MQ 0x800 /* Multi Queue logs */
#define QEDF_LOG_BSG 0x1000 /* BSG logs */
#define QEDF_LOG_DEBUGFS 0x2000 /* debugFS logs */
#define QEDF_LOG_LPORT 0x4000 /* lport logs */
#define QEDF_LOG_ELS 0x8000 /* ELS logs */
#define QEDF_LOG_NPIV 0x10000 /* NPIV logs */
#define QEDF_LOG_SESS 0x20000 /* Conection setup, cleanup */
#define QEDF_LOG_TID 0x80000 /*
* FW TID context acquire
* free
*/
#define QEDF_TRACK_TID 0x100000 /*
* Track TID state. To be
* enabled only at module load
* and not run-time.
*/
#define QEDF_TRACK_CMD_LIST 0x300000 /*
* Track active cmd list nodes,
* done with reference to TID,
* hence TRACK_TID also enabled.
*/
#define QEDF_LOG_NOTICE 0x40000000 /* Notice logs */
#define QEDF_LOG_WARN 0x80000000 /* Warning logs */
/* Debug context structure */
struct qedf_dbg_ctx {
unsigned int host_no;
struct pci_dev *pdev;
#ifdef CONFIG_DEBUG_FS
struct dentry *bdf_dentry;
#endif
};
#define QEDF_ERR(pdev, fmt, ...) \
qedf_dbg_err(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__)
#define QEDF_WARN(pdev, fmt, ...) \
qedf_dbg_warn(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__)
#define QEDF_NOTICE(pdev, fmt, ...) \
qedf_dbg_notice(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__)
#define QEDF_INFO(pdev, level, fmt, ...) \
qedf_dbg_info(pdev, __func__, __LINE__, level, fmt, \
## __VA_ARGS__)
extern void qedf_dbg_err(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
const char *fmt, ...);
extern void qedf_dbg_warn(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
const char *, ...);
extern void qedf_dbg_notice(struct qedf_dbg_ctx *qedf, const char *func,
u32 line, const char *, ...);
extern void qedf_dbg_info(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
u32 info, const char *fmt, ...);
/* GRC Dump related defines */
struct Scsi_Host;
#define QEDF_UEVENT_CODE_GRCDUMP 0
struct sysfs_bin_attrs {
char *name;
struct bin_attribute *attr;
};
extern int qedf_alloc_grc_dump_buf(uint8_t **buf, uint32_t len);
extern void qedf_free_grc_dump_buf(uint8_t **buf);
extern int qedf_get_grc_dump(struct qed_dev *cdev,
const struct qed_common_ops *common, uint8_t **buf,
uint32_t *grcsize);
extern void qedf_uevent_emit(struct Scsi_Host *shost, u32 code, char *msg);
extern int qedf_create_sysfs_attr(struct Scsi_Host *shost,
struct sysfs_bin_attrs *iter);
extern void qedf_remove_sysfs_attr(struct Scsi_Host *shost,
struct sysfs_bin_attrs *iter);
#ifdef CONFIG_DEBUG_FS
/* DebugFS related code */
struct qedf_list_of_funcs {
char *oper_str;
ssize_t (*oper_func)(struct qedf_dbg_ctx *qedf);
};
struct qedf_debugfs_ops {
char *name;
struct qedf_list_of_funcs *qedf_funcs;
};
#define qedf_dbg_fileops(drv, ops) \
{ \
.owner = THIS_MODULE, \
.open = simple_open, \
.read = drv##_dbg_##ops##_cmd_read, \
.write = drv##_dbg_##ops##_cmd_write \
}
/* Used for debugfs sequential files */
#define qedf_dbg_fileops_seq(drv, ops) \
{ \
.owner = THIS_MODULE, \
.open = drv##_dbg_##ops##_open, \
.read = seq_read, \
.llseek = seq_lseek, \
.release = single_release, \
}
extern void qedf_dbg_host_init(struct qedf_dbg_ctx *qedf,
struct qedf_debugfs_ops *dops,
struct file_operations *fops);
extern void qedf_dbg_host_exit(struct qedf_dbg_ctx *qedf);
extern void qedf_dbg_init(char *drv_name);
extern void qedf_dbg_exit(void);
#endif /* CONFIG_DEBUG_FS */
#endif /* _QEDF_DBG_H_ */

View File

@ -0,0 +1,460 @@
/*
* QLogic FCoE Offload Driver
* Copyright (c) 2016 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifdef CONFIG_DEBUG_FS
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <linux/module.h>
#include "qedf.h"
#include "qedf_dbg.h"
static struct dentry *qedf_dbg_root;
/**
* qedf_dbg_host_init - setup the debugfs file for the pf
* @pf: the pf that is starting up
**/
void
qedf_dbg_host_init(struct qedf_dbg_ctx *qedf,
struct qedf_debugfs_ops *dops,
struct file_operations *fops)
{
char host_dirname[32];
struct dentry *file_dentry = NULL;
QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "Creating debugfs host node\n");
/* create pf dir */
sprintf(host_dirname, "host%u", qedf->host_no);
qedf->bdf_dentry = debugfs_create_dir(host_dirname, qedf_dbg_root);
if (!qedf->bdf_dentry)
return;
/* create debugfs files */
while (dops) {
if (!(dops->name))
break;
file_dentry = debugfs_create_file(dops->name, 0600,
qedf->bdf_dentry, qedf,
fops);
if (!file_dentry) {
QEDF_INFO(qedf, QEDF_LOG_DEBUGFS,
"Debugfs entry %s creation failed\n",
dops->name);
debugfs_remove_recursive(qedf->bdf_dentry);
return;
}
dops++;
fops++;
}
}
/**
* qedf_dbg_host_exit - clear out the pf's debugfs entries
* @pf: the pf that is stopping
**/
void
qedf_dbg_host_exit(struct qedf_dbg_ctx *qedf)
{
QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "Destroying debugfs host "
"entry\n");
/* remove debugfs entries of this PF */
debugfs_remove_recursive(qedf->bdf_dentry);
qedf->bdf_dentry = NULL;
}
/**
* qedf_dbg_init - start up debugfs for the driver
**/
void
qedf_dbg_init(char *drv_name)
{
QEDF_INFO(NULL, QEDF_LOG_DEBUGFS, "Creating debugfs root node\n");
/* create qed dir in root of debugfs. NULL means debugfs root */
qedf_dbg_root = debugfs_create_dir(drv_name, NULL);
if (!qedf_dbg_root)
QEDF_INFO(NULL, QEDF_LOG_DEBUGFS, "Init of debugfs "
"failed\n");
}
/**
* qedf_dbg_exit - clean out the driver's debugfs entries
**/
void
qedf_dbg_exit(void)
{
QEDF_INFO(NULL, QEDF_LOG_DEBUGFS, "Destroying debugfs root "
"entry\n");
/* remove qed dir in root of debugfs */
debugfs_remove_recursive(qedf_dbg_root);
qedf_dbg_root = NULL;
}
struct qedf_debugfs_ops qedf_debugfs_ops[] = {
{ "fp_int", NULL },
{ "io_trace", NULL },
{ "debug", NULL },
{ "stop_io_on_error", NULL},
{ "driver_stats", NULL},
{ "clear_stats", NULL},
{ "offload_stats", NULL},
/* This must be last */
{ NULL, NULL }
};
DECLARE_PER_CPU(struct qedf_percpu_iothread_s, qedf_percpu_iothreads);
static ssize_t
qedf_dbg_fp_int_cmd_read(struct file *filp, char __user *buffer, size_t count,
loff_t *ppos)
{
size_t cnt = 0;
int id;
struct qedf_fastpath *fp = NULL;
struct qedf_dbg_ctx *qedf_dbg =
(struct qedf_dbg_ctx *)filp->private_data;
struct qedf_ctx *qedf = container_of(qedf_dbg,
struct qedf_ctx, dbg_ctx);
QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n");
cnt = sprintf(buffer, "\nFastpath I/O completions\n\n");
for (id = 0; id < qedf->num_queues; id++) {
fp = &(qedf->fp_array[id]);
if (fp->sb_id == QEDF_SB_ID_NULL)
continue;
cnt += sprintf((buffer + cnt), "#%d: %lu\n", id,
fp->completions);
}
cnt = min_t(int, count, cnt - *ppos);
*ppos += cnt;
return cnt;
}
static ssize_t
qedf_dbg_fp_int_cmd_write(struct file *filp, const char __user *buffer,
size_t count, loff_t *ppos)
{
if (!count || *ppos)
return 0;
return count;
}
static ssize_t
qedf_dbg_debug_cmd_read(struct file *filp, char __user *buffer, size_t count,
loff_t *ppos)
{
int cnt;
struct qedf_dbg_ctx *qedf =
(struct qedf_dbg_ctx *)filp->private_data;
QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "entered\n");
cnt = sprintf(buffer, "debug mask = 0x%x\n", qedf_debug);
cnt = min_t(int, count, cnt - *ppos);
*ppos += cnt;
return cnt;
}
static ssize_t
qedf_dbg_debug_cmd_write(struct file *filp, const char __user *buffer,
size_t count, loff_t *ppos)
{
uint32_t val;
void *kern_buf;
int rval;
struct qedf_dbg_ctx *qedf =
(struct qedf_dbg_ctx *)filp->private_data;
if (!count || *ppos)
return 0;
kern_buf = memdup_user(buffer, count);
if (IS_ERR(kern_buf))
return PTR_ERR(kern_buf);
rval = kstrtouint(kern_buf, 10, &val);
kfree(kern_buf);
if (rval)
return rval;
if (val == 1)
qedf_debug = QEDF_DEFAULT_LOG_MASK;
else
qedf_debug = val;
QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "Setting debug=0x%x.\n", val);
return count;
}
static ssize_t
qedf_dbg_stop_io_on_error_cmd_read(struct file *filp, char __user *buffer,
size_t count, loff_t *ppos)
{
int cnt;
struct qedf_dbg_ctx *qedf_dbg =
(struct qedf_dbg_ctx *)filp->private_data;
struct qedf_ctx *qedf = container_of(qedf_dbg,
struct qedf_ctx, dbg_ctx);
QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n");
cnt = sprintf(buffer, "%s\n",
qedf->stop_io_on_error ? "true" : "false");
cnt = min_t(int, count, cnt - *ppos);
*ppos += cnt;
return cnt;
}
static ssize_t
qedf_dbg_stop_io_on_error_cmd_write(struct file *filp,
const char __user *buffer, size_t count,
loff_t *ppos)
{
void *kern_buf;
struct qedf_dbg_ctx *qedf_dbg =
(struct qedf_dbg_ctx *)filp->private_data;
struct qedf_ctx *qedf = container_of(qedf_dbg, struct qedf_ctx,
dbg_ctx);
QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n");
if (!count || *ppos)
return 0;
kern_buf = memdup_user(buffer, 6);
if (IS_ERR(kern_buf))
return PTR_ERR(kern_buf);
if (strncmp(kern_buf, "false", 5) == 0)
qedf->stop_io_on_error = false;
else if (strncmp(kern_buf, "true", 4) == 0)
qedf->stop_io_on_error = true;
else if (strncmp(kern_buf, "now", 3) == 0)
/* Trigger from user to stop all I/O on this host */
set_bit(QEDF_DBG_STOP_IO, &qedf->flags);
kfree(kern_buf);
return count;
}
static int
qedf_io_trace_show(struct seq_file *s, void *unused)
{
int i, idx = 0;
struct qedf_ctx *qedf = s->private;
struct qedf_dbg_ctx *qedf_dbg = &qedf->dbg_ctx;
struct qedf_io_log *io_log;
unsigned long flags;
if (!qedf_io_tracing) {
seq_puts(s, "I/O tracing not enabled.\n");
goto out;
}
QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n");
spin_lock_irqsave(&qedf->io_trace_lock, flags);
idx = qedf->io_trace_idx;
for (i = 0; i < QEDF_IO_TRACE_SIZE; i++) {
io_log = &qedf->io_trace_buf[idx];
seq_printf(s, "%d:", io_log->direction);
seq_printf(s, "0x%x:", io_log->task_id);
seq_printf(s, "0x%06x:", io_log->port_id);
seq_printf(s, "%d:", io_log->lun);
seq_printf(s, "0x%02x:", io_log->op);
seq_printf(s, "0x%02x%02x%02x%02x:", io_log->lba[0],
io_log->lba[1], io_log->lba[2], io_log->lba[3]);
seq_printf(s, "%d:", io_log->bufflen);
seq_printf(s, "%d:", io_log->sg_count);
seq_printf(s, "0x%08x:", io_log->result);
seq_printf(s, "%lu:", io_log->jiffies);
seq_printf(s, "%d:", io_log->refcount);
seq_printf(s, "%d:", io_log->req_cpu);
seq_printf(s, "%d:", io_log->int_cpu);
seq_printf(s, "%d:", io_log->rsp_cpu);
seq_printf(s, "%d\n", io_log->sge_type);
idx++;
if (idx == QEDF_IO_TRACE_SIZE)
idx = 0;
}
spin_unlock_irqrestore(&qedf->io_trace_lock, flags);
out:
return 0;
}
static int
qedf_dbg_io_trace_open(struct inode *inode, struct file *file)
{
struct qedf_dbg_ctx *qedf_dbg = inode->i_private;
struct qedf_ctx *qedf = container_of(qedf_dbg,
struct qedf_ctx, dbg_ctx);
return single_open(file, qedf_io_trace_show, qedf);
}
static int
qedf_driver_stats_show(struct seq_file *s, void *unused)
{
struct qedf_ctx *qedf = s->private;
struct qedf_rport *fcport;
struct fc_rport_priv *rdata;
seq_printf(s, "cmg_mgr free io_reqs: %d\n",
atomic_read(&qedf->cmd_mgr->free_list_cnt));
seq_printf(s, "slow SGEs: %d\n", qedf->slow_sge_ios);
seq_printf(s, "single SGEs: %d\n", qedf->single_sge_ios);
seq_printf(s, "fast SGEs: %d\n\n", qedf->fast_sge_ios);
seq_puts(s, "Offloaded ports:\n\n");
rcu_read_lock();
list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
rdata = fcport->rdata;
if (rdata == NULL)
continue;
seq_printf(s, "%06x: free_sqes: %d, num_active_ios: %d\n",
rdata->ids.port_id, atomic_read(&fcport->free_sqes),
atomic_read(&fcport->num_active_ios));
}
rcu_read_unlock();
return 0;
}
static int
qedf_dbg_driver_stats_open(struct inode *inode, struct file *file)
{
struct qedf_dbg_ctx *qedf_dbg = inode->i_private;
struct qedf_ctx *qedf = container_of(qedf_dbg,
struct qedf_ctx, dbg_ctx);
return single_open(file, qedf_driver_stats_show, qedf);
}
static ssize_t
qedf_dbg_clear_stats_cmd_read(struct file *filp, char __user *buffer,
size_t count, loff_t *ppos)
{
int cnt = 0;
/* Essentially a read stub */
cnt = min_t(int, count, cnt - *ppos);
*ppos += cnt;
return cnt;
}
static ssize_t
qedf_dbg_clear_stats_cmd_write(struct file *filp,
const char __user *buffer, size_t count,
loff_t *ppos)
{
struct qedf_dbg_ctx *qedf_dbg =
(struct qedf_dbg_ctx *)filp->private_data;
struct qedf_ctx *qedf = container_of(qedf_dbg, struct qedf_ctx,
dbg_ctx);
QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "Clearing stat counters.\n");
if (!count || *ppos)
return 0;
/* Clear stat counters exposed by 'stats' node */
qedf->slow_sge_ios = 0;
qedf->single_sge_ios = 0;
qedf->fast_sge_ios = 0;
return count;
}
static int
qedf_offload_stats_show(struct seq_file *s, void *unused)
{
struct qedf_ctx *qedf = s->private;
struct qed_fcoe_stats *fw_fcoe_stats;
fw_fcoe_stats = kmalloc(sizeof(struct qed_fcoe_stats), GFP_KERNEL);
if (!fw_fcoe_stats) {
QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate memory for "
"fw_fcoe_stats.\n");
goto out;
}
/* Query firmware for offload stats */
qed_ops->get_stats(qedf->cdev, fw_fcoe_stats);
seq_printf(s, "fcoe_rx_byte_cnt=%llu\n"
"fcoe_rx_data_pkt_cnt=%llu\n"
"fcoe_rx_xfer_pkt_cnt=%llu\n"
"fcoe_rx_other_pkt_cnt=%llu\n"
"fcoe_silent_drop_pkt_cmdq_full_cnt=%u\n"
"fcoe_silent_drop_pkt_crc_error_cnt=%u\n"
"fcoe_silent_drop_pkt_task_invalid_cnt=%u\n"
"fcoe_silent_drop_total_pkt_cnt=%u\n"
"fcoe_silent_drop_pkt_rq_full_cnt=%u\n"
"fcoe_tx_byte_cnt=%llu\n"
"fcoe_tx_data_pkt_cnt=%llu\n"
"fcoe_tx_xfer_pkt_cnt=%llu\n"
"fcoe_tx_other_pkt_cnt=%llu\n",
fw_fcoe_stats->fcoe_rx_byte_cnt,
fw_fcoe_stats->fcoe_rx_data_pkt_cnt,
fw_fcoe_stats->fcoe_rx_xfer_pkt_cnt,
fw_fcoe_stats->fcoe_rx_other_pkt_cnt,
fw_fcoe_stats->fcoe_silent_drop_pkt_cmdq_full_cnt,
fw_fcoe_stats->fcoe_silent_drop_pkt_crc_error_cnt,
fw_fcoe_stats->fcoe_silent_drop_pkt_task_invalid_cnt,
fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt,
fw_fcoe_stats->fcoe_silent_drop_pkt_rq_full_cnt,
fw_fcoe_stats->fcoe_tx_byte_cnt,
fw_fcoe_stats->fcoe_tx_data_pkt_cnt,
fw_fcoe_stats->fcoe_tx_xfer_pkt_cnt,
fw_fcoe_stats->fcoe_tx_other_pkt_cnt);
kfree(fw_fcoe_stats);
out:
return 0;
}
static int
qedf_dbg_offload_stats_open(struct inode *inode, struct file *file)
{
struct qedf_dbg_ctx *qedf_dbg = inode->i_private;
struct qedf_ctx *qedf = container_of(qedf_dbg,
struct qedf_ctx, dbg_ctx);
return single_open(file, qedf_offload_stats_show, qedf);
}
const struct file_operations qedf_dbg_fops[] = {
qedf_dbg_fileops(qedf, fp_int),
qedf_dbg_fileops_seq(qedf, io_trace),
qedf_dbg_fileops(qedf, debug),
qedf_dbg_fileops(qedf, stop_io_on_error),
qedf_dbg_fileops_seq(qedf, driver_stats),
qedf_dbg_fileops(qedf, clear_stats),
qedf_dbg_fileops_seq(qedf, offload_stats),
/* This must be last */
{ NULL, NULL },
};
#else /* CONFIG_DEBUG_FS */
void qedf_dbg_host_init(struct qedf_dbg_ctx *);
void qedf_dbg_host_exit(struct qedf_dbg_ctx *);
void qedf_dbg_init(char *);
void qedf_dbg_exit(void);
#endif /* CONFIG_DEBUG_FS */

View File

@ -0,0 +1,949 @@
/*
* QLogic FCoE Offload Driver
* Copyright (c) 2016 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#include "qedf.h"
/* It's assumed that the lock is held when calling this function. */
static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
void *data, uint32_t data_len,
void (*cb_func)(struct qedf_els_cb_arg *cb_arg),
struct qedf_els_cb_arg *cb_arg, uint32_t timer_msec)
{
struct qedf_ctx *qedf = fcport->qedf;
struct fc_lport *lport = qedf->lport;
struct qedf_ioreq *els_req;
struct qedf_mp_req *mp_req;
struct fc_frame_header *fc_hdr;
struct fcoe_task_context *task;
int rc = 0;
uint32_t did, sid;
uint16_t xid;
uint32_t start_time = jiffies / HZ;
uint32_t current_time;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending ELS\n");
rc = fc_remote_port_chkready(fcport->rport);
if (rc) {
QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: rport not ready\n", op);
rc = -EAGAIN;
goto els_err;
}
if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: link is not ready\n",
op);
rc = -EAGAIN;
goto els_err;
}
if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) {
QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: fcport not ready\n", op);
rc = -EINVAL;
goto els_err;
}
retry_els:
els_req = qedf_alloc_cmd(fcport, QEDF_ELS);
if (!els_req) {
current_time = jiffies / HZ;
if ((current_time - start_time) > 10) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
"els: Failed els 0x%x\n", op);
rc = -ENOMEM;
goto els_err;
}
mdelay(20 * USEC_PER_MSEC);
goto retry_els;
}
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "initiate_els els_req = "
"0x%p cb_arg = %p xid = %x\n", els_req, cb_arg,
els_req->xid);
els_req->sc_cmd = NULL;
els_req->cmd_type = QEDF_ELS;
els_req->fcport = fcport;
els_req->cb_func = cb_func;
cb_arg->io_req = els_req;
cb_arg->op = op;
els_req->cb_arg = cb_arg;
els_req->data_xfer_len = data_len;
/* Record which cpu this request is associated with */
els_req->cpu = smp_processor_id();
mp_req = (struct qedf_mp_req *)&(els_req->mp_req);
rc = qedf_init_mp_req(els_req);
if (rc) {
QEDF_ERR(&(qedf->dbg_ctx), "ELS MP request init failed\n");
kref_put(&els_req->refcount, qedf_release_cmd);
goto els_err;
} else {
rc = 0;
}
/* Fill ELS Payload */
if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
memcpy(mp_req->req_buf, data, data_len);
} else {
QEDF_ERR(&(qedf->dbg_ctx), "Invalid ELS op 0x%x\n", op);
els_req->cb_func = NULL;
els_req->cb_arg = NULL;
kref_put(&els_req->refcount, qedf_release_cmd);
rc = -EINVAL;
}
if (rc)
goto els_err;
/* Fill FC header */
fc_hdr = &(mp_req->req_fc_hdr);
did = fcport->rdata->ids.port_id;
sid = fcport->sid;
__fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, sid, did,
FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
FC_FC_SEQ_INIT, 0);
/* Obtain exchange id */
xid = els_req->xid;
/* Initialize task context for this IO request */
task = qedf_get_task_mem(&qedf->tasks, xid);
qedf_init_mp_task(els_req, task);
/* Put timer on original I/O request */
if (timer_msec)
qedf_cmd_timer_set(qedf, els_req, timer_msec);
qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_MIDPATH, 0);
/* Ring doorbell */
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Ringing doorbell for ELS "
"req\n");
qedf_ring_doorbell(fcport);
els_err:
return rc;
}
void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
struct qedf_ioreq *els_req)
{
struct fcoe_task_context *task_ctx;
struct scsi_cmnd *sc_cmd;
uint16_t xid;
struct fcoe_cqe_midpath_info *mp_info;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered with xid = 0x%x"
" cmd_type = %d.\n", els_req->xid, els_req->cmd_type);
/* Kill the ELS timer */
cancel_delayed_work(&els_req->timeout_work);
xid = els_req->xid;
task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
sc_cmd = els_req->sc_cmd;
/* Get ELS response length from CQE */
mp_info = &cqe->cqe_info.midpath_info;
els_req->mp_req.resp_len = mp_info->data_placement_size;
/* Parse ELS response */
if ((els_req->cb_func) && (els_req->cb_arg)) {
els_req->cb_func(els_req->cb_arg);
els_req->cb_arg = NULL;
}
kref_put(&els_req->refcount, qedf_release_cmd);
}
static void qedf_rrq_compl(struct qedf_els_cb_arg *cb_arg)
{
struct qedf_ioreq *orig_io_req;
struct qedf_ioreq *rrq_req;
struct qedf_ctx *qedf;
int refcount;
rrq_req = cb_arg->io_req;
qedf = rrq_req->fcport->qedf;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered.\n");
orig_io_req = cb_arg->aborted_io_req;
if (!orig_io_req)
goto out_free;
if (rrq_req->event != QEDF_IOREQ_EV_ELS_TMO &&
rrq_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
cancel_delayed_work_sync(&orig_io_req->timeout_work);
refcount = atomic_read(&orig_io_req->refcount.refcount);
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "rrq_compl: orig io = %p,"
" orig xid = 0x%x, rrq_xid = 0x%x, refcount=%d\n",
orig_io_req, orig_io_req->xid, rrq_req->xid, refcount);
/* This should return the aborted io_req to the command pool */
if (orig_io_req)
kref_put(&orig_io_req->refcount, qedf_release_cmd);
out_free:
kfree(cb_arg);
}
/* Assumes kref is already held by caller */
int qedf_send_rrq(struct qedf_ioreq *aborted_io_req)
{
struct fc_els_rrq rrq;
struct qedf_rport *fcport;
struct fc_lport *lport;
struct qedf_els_cb_arg *cb_arg = NULL;
struct qedf_ctx *qedf;
uint32_t sid;
uint32_t r_a_tov;
int rc;
if (!aborted_io_req) {
QEDF_ERR(NULL, "abort_io_req is NULL.\n");
return -EINVAL;
}
fcport = aborted_io_req->fcport;
/* Check that fcport is still offloaded */
if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) {
QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
return -EINVAL;
}
if (!fcport->qedf) {
QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
return -EINVAL;
}
qedf = fcport->qedf;
lport = qedf->lport;
sid = fcport->sid;
r_a_tov = lport->r_a_tov;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending RRQ orig "
"io = %p, orig_xid = 0x%x\n", aborted_io_req,
aborted_io_req->xid);
memset(&rrq, 0, sizeof(rrq));
cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
if (!cb_arg) {
QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
"RRQ\n");
rc = -ENOMEM;
goto rrq_err;
}
cb_arg->aborted_io_req = aborted_io_req;
rrq.rrq_cmd = ELS_RRQ;
hton24(rrq.rrq_s_id, sid);
rrq.rrq_ox_id = htons(aborted_io_req->xid);
rrq.rrq_rx_id =
htons(aborted_io_req->task->tstorm_st_context.read_write.rx_id);
rc = qedf_initiate_els(fcport, ELS_RRQ, &rrq, sizeof(rrq),
qedf_rrq_compl, cb_arg, r_a_tov);
rrq_err:
if (rc) {
QEDF_ERR(&(qedf->dbg_ctx), "RRQ failed - release orig io "
"req 0x%x\n", aborted_io_req->xid);
kfree(cb_arg);
kref_put(&aborted_io_req->refcount, qedf_release_cmd);
}
return rc;
}
static void qedf_process_l2_frame_compl(struct qedf_rport *fcport,
struct fc_frame *fp,
u16 l2_oxid)
{
struct fc_lport *lport = fcport->qedf->lport;
struct fc_frame_header *fh;
u32 crc;
fh = (struct fc_frame_header *)fc_frame_header_get(fp);
/* Set the OXID we return to what libfc used */
if (l2_oxid != FC_XID_UNKNOWN)
fh->fh_ox_id = htons(l2_oxid);
/* Setup header fields */
fh->fh_r_ctl = FC_RCTL_ELS_REP;
fh->fh_type = FC_TYPE_ELS;
/* Last sequence, end sequence */
fh->fh_f_ctl[0] = 0x98;
hton24(fh->fh_d_id, lport->port_id);
hton24(fh->fh_s_id, fcport->rdata->ids.port_id);
fh->fh_rx_id = 0xffff;
/* Set frame attributes */
crc = fcoe_fc_crc(fp);
fc_frame_init(fp);
fr_dev(fp) = lport;
fr_sof(fp) = FC_SOF_I3;
fr_eof(fp) = FC_EOF_T;
fr_crc(fp) = cpu_to_le32(~crc);
/* Send completed request to libfc */
fc_exch_recv(lport, fp);
}
/*
* In instances where an ELS command times out we may need to restart the
* rport by logging out and then logging back in.
*/
void qedf_restart_rport(struct qedf_rport *fcport)
{
struct fc_lport *lport;
struct fc_rport_priv *rdata;
u32 port_id;
if (!fcport)
return;
rdata = fcport->rdata;
if (rdata) {
lport = fcport->qedf->lport;
port_id = rdata->ids.port_id;
QEDF_ERR(&(fcport->qedf->dbg_ctx),
"LOGO port_id=%x.\n", port_id);
fc_rport_logoff(rdata);
/* Recreate the rport and log back in */
rdata = fc_rport_create(lport, port_id);
if (rdata)
fc_rport_login(rdata);
}
}
static void qedf_l2_els_compl(struct qedf_els_cb_arg *cb_arg)
{
struct qedf_ioreq *els_req;
struct qedf_rport *fcport;
struct qedf_mp_req *mp_req;
struct fc_frame *fp;
struct fc_frame_header *fh, *mp_fc_hdr;
void *resp_buf, *fc_payload;
u32 resp_len;
u16 l2_oxid;
l2_oxid = cb_arg->l2_oxid;
els_req = cb_arg->io_req;
if (!els_req) {
QEDF_ERR(NULL, "els_req is NULL.\n");
goto free_arg;
}
/*
* If we are flushing the command just free the cb_arg as none of the
* response data will be valid.
*/
if (els_req->event == QEDF_IOREQ_EV_ELS_FLUSH)
goto free_arg;
fcport = els_req->fcport;
mp_req = &(els_req->mp_req);
mp_fc_hdr = &(mp_req->resp_fc_hdr);
resp_len = mp_req->resp_len;
resp_buf = mp_req->resp_buf;
/*
* If a middle path ELS command times out, don't try to return
* the command but rather do any internal cleanup and then libfc
* timeout the command and clean up its internal resources.
*/
if (els_req->event == QEDF_IOREQ_EV_ELS_TMO) {
/*
* If ADISC times out, libfc will timeout the exchange and then
* try to send a PLOGI which will timeout since the session is
* still offloaded. Force libfc to logout the session which
* will offload the connection and allow the PLOGI response to
* flow over the LL2 path.
*/
if (cb_arg->op == ELS_ADISC)
qedf_restart_rport(fcport);
return;
}
if (sizeof(struct fc_frame_header) + resp_len > QEDF_PAGE_SIZE) {
QEDF_ERR(&(fcport->qedf->dbg_ctx), "resp_len is "
"beyond page size.\n");
goto free_arg;
}
fp = fc_frame_alloc(fcport->qedf->lport, resp_len);
if (!fp) {
QEDF_ERR(&(fcport->qedf->dbg_ctx),
"fc_frame_alloc failure.\n");
return;
}
/* Copy frame header from firmware into fp */
fh = (struct fc_frame_header *)fc_frame_header_get(fp);
memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
/* Copy payload from firmware into fp */
fc_payload = fc_frame_payload_get(fp, resp_len);
memcpy(fc_payload, resp_buf, resp_len);
QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
"Completing OX_ID 0x%x back to libfc.\n", l2_oxid);
qedf_process_l2_frame_compl(fcport, fp, l2_oxid);
free_arg:
kfree(cb_arg);
}
int qedf_send_adisc(struct qedf_rport *fcport, struct fc_frame *fp)
{
struct fc_els_adisc *adisc;
struct fc_frame_header *fh;
struct fc_lport *lport = fcport->qedf->lport;
struct qedf_els_cb_arg *cb_arg = NULL;
struct qedf_ctx *qedf;
uint32_t r_a_tov = lport->r_a_tov;
int rc;
qedf = fcport->qedf;
fh = fc_frame_header_get(fp);
cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
if (!cb_arg) {
QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
"ADISC\n");
rc = -ENOMEM;
goto adisc_err;
}
cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
"Sending ADISC ox_id=0x%x.\n", cb_arg->l2_oxid);
adisc = fc_frame_payload_get(fp, sizeof(*adisc));
rc = qedf_initiate_els(fcport, ELS_ADISC, adisc, sizeof(*adisc),
qedf_l2_els_compl, cb_arg, r_a_tov);
adisc_err:
if (rc) {
QEDF_ERR(&(qedf->dbg_ctx), "ADISC failed.\n");
kfree(cb_arg);
}
return rc;
}
static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg)
{
struct qedf_ioreq *orig_io_req;
struct qedf_ioreq *srr_req;
struct qedf_mp_req *mp_req;
struct fc_frame_header *mp_fc_hdr, *fh;
struct fc_frame *fp;
void *resp_buf, *fc_payload;
u32 resp_len;
struct fc_lport *lport;
struct qedf_ctx *qedf;
int refcount;
u8 opcode;
srr_req = cb_arg->io_req;
qedf = srr_req->fcport->qedf;
lport = qedf->lport;
orig_io_req = cb_arg->aborted_io_req;
if (!orig_io_req)
goto out_free;
clear_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags);
if (srr_req->event != QEDF_IOREQ_EV_ELS_TMO &&
srr_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
cancel_delayed_work_sync(&orig_io_req->timeout_work);
refcount = atomic_read(&orig_io_req->refcount.refcount);
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p,"
" orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
orig_io_req, orig_io_req->xid, srr_req->xid, refcount);
/* If a SRR times out, simply free resources */
if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO)
goto out_free;
/* Normalize response data into struct fc_frame */
mp_req = &(srr_req->mp_req);
mp_fc_hdr = &(mp_req->resp_fc_hdr);
resp_len = mp_req->resp_len;
resp_buf = mp_req->resp_buf;
fp = fc_frame_alloc(lport, resp_len);
if (!fp) {
QEDF_ERR(&(qedf->dbg_ctx),
"fc_frame_alloc failure.\n");
goto out_free;
}
/* Copy frame header from firmware into fp */
fh = (struct fc_frame_header *)fc_frame_header_get(fp);
memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
/* Copy payload from firmware into fp */
fc_payload = fc_frame_payload_get(fp, resp_len);
memcpy(fc_payload, resp_buf, resp_len);
opcode = fc_frame_payload_op(fp);
switch (opcode) {
case ELS_LS_ACC:
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
"SRR success.\n");
break;
case ELS_LS_RJT:
QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
"SRR rejected.\n");
qedf_initiate_abts(orig_io_req, true);
break;
}
fc_frame_free(fp);
out_free:
/* Put reference for original command since SRR completed */
kref_put(&orig_io_req->refcount, qedf_release_cmd);
kfree(cb_arg);
}
static int qedf_send_srr(struct qedf_ioreq *orig_io_req, u32 offset, u8 r_ctl)
{
struct fcp_srr srr;
struct qedf_ctx *qedf;
struct qedf_rport *fcport;
struct fc_lport *lport;
struct qedf_els_cb_arg *cb_arg = NULL;
u32 sid, r_a_tov;
int rc;
if (!orig_io_req) {
QEDF_ERR(NULL, "orig_io_req is NULL.\n");
return -EINVAL;
}
fcport = orig_io_req->fcport;
/* Check that fcport is still offloaded */
if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) {
QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
return -EINVAL;
}
if (!fcport->qedf) {
QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
return -EINVAL;
}
/* Take reference until SRR command completion */
kref_get(&orig_io_req->refcount);
qedf = fcport->qedf;
lport = qedf->lport;
sid = fcport->sid;
r_a_tov = lport->r_a_tov;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending SRR orig_io=%p, "
"orig_xid=0x%x\n", orig_io_req, orig_io_req->xid);
memset(&srr, 0, sizeof(srr));
cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
if (!cb_arg) {
QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
"SRR\n");
rc = -ENOMEM;
goto srr_err;
}
cb_arg->aborted_io_req = orig_io_req;
srr.srr_op = ELS_SRR;
srr.srr_ox_id = htons(orig_io_req->xid);
srr.srr_rx_id = htons(orig_io_req->rx_id);
srr.srr_rel_off = htonl(offset);
srr.srr_r_ctl = r_ctl;
rc = qedf_initiate_els(fcport, ELS_SRR, &srr, sizeof(srr),
qedf_srr_compl, cb_arg, r_a_tov);
srr_err:
if (rc) {
QEDF_ERR(&(qedf->dbg_ctx), "SRR failed - release orig_io_req"
"=0x%x\n", orig_io_req->xid);
kfree(cb_arg);
/* If we fail to queue SRR, send ABTS to orig_io */
qedf_initiate_abts(orig_io_req, true);
kref_put(&orig_io_req->refcount, qedf_release_cmd);
} else
/* Tell other threads that SRR is in progress */
set_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags);
return rc;
}
static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req,
u32 offset, u8 r_ctl)
{
struct qedf_rport *fcport;
unsigned long flags;
struct qedf_els_cb_arg *cb_arg;
fcport = orig_io_req->fcport;
QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
"Doing sequence cleanup for xid=0x%x offset=%u.\n",
orig_io_req->xid, offset);
cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
if (!cb_arg) {
QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to allocate cb_arg "
"for sequence cleanup\n");
return;
}
/* Get reference for cleanup request */
kref_get(&orig_io_req->refcount);
orig_io_req->cmd_type = QEDF_SEQ_CLEANUP;
cb_arg->offset = offset;
cb_arg->r_ctl = r_ctl;
orig_io_req->cb_arg = cb_arg;
qedf_cmd_timer_set(fcport->qedf, orig_io_req,
QEDF_CLEANUP_TIMEOUT * HZ);
spin_lock_irqsave(&fcport->rport_lock, flags);
qedf_add_to_sq(fcport, orig_io_req->xid, 0,
FCOE_TASK_TYPE_SEQUENCE_CLEANUP, offset);
qedf_ring_doorbell(fcport);
spin_unlock_irqrestore(&fcport->rport_lock, flags);
}
void qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf,
struct fcoe_cqe *cqe, struct qedf_ioreq *io_req)
{
int rc;
struct qedf_els_cb_arg *cb_arg;
cb_arg = io_req->cb_arg;
/* If we timed out just free resources */
if (io_req->event == QEDF_IOREQ_EV_ELS_TMO || !cqe)
goto free;
/* Kill the timer we put on the request */
cancel_delayed_work_sync(&io_req->timeout_work);
rc = qedf_send_srr(io_req, cb_arg->offset, cb_arg->r_ctl);
if (rc)
QEDF_ERR(&(qedf->dbg_ctx), "Unable to send SRR, I/O will "
"abort, xid=0x%x.\n", io_req->xid);
free:
kfree(cb_arg);
kref_put(&io_req->refcount, qedf_release_cmd);
}
static bool qedf_requeue_io_req(struct qedf_ioreq *orig_io_req)
{
struct qedf_rport *fcport;
struct qedf_ioreq *new_io_req;
unsigned long flags;
bool rc = false;
fcport = orig_io_req->fcport;
if (!fcport) {
QEDF_ERR(NULL, "fcport is NULL.\n");
goto out;
}
if (!orig_io_req->sc_cmd) {
QEDF_ERR(&(fcport->qedf->dbg_ctx), "sc_cmd is NULL for "
"xid=0x%x.\n", orig_io_req->xid);
goto out;
}
new_io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
if (!new_io_req) {
QEDF_ERR(&(fcport->qedf->dbg_ctx), "Could not allocate new "
"io_req.\n");
goto out;
}
new_io_req->sc_cmd = orig_io_req->sc_cmd;
/*
* This keeps the sc_cmd struct from being returned to the tape
* driver and being requeued twice. We do need to put a reference
* for the original I/O request since we will not do a SCSI completion
* for it.
*/
orig_io_req->sc_cmd = NULL;
kref_put(&orig_io_req->refcount, qedf_release_cmd);
spin_lock_irqsave(&fcport->rport_lock, flags);
/* kref for new command released in qedf_post_io_req on error */
if (qedf_post_io_req(fcport, new_io_req)) {
QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to post io_req\n");
/* Return SQE to pool */
atomic_inc(&fcport->free_sqes);
} else {
QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
"Reissued SCSI command from orig_xid=0x%x on "
"new_xid=0x%x.\n", orig_io_req->xid, new_io_req->xid);
/*
* Abort the original I/O but do not return SCSI command as
* it has been reissued on another OX_ID.
*/
spin_unlock_irqrestore(&fcport->rport_lock, flags);
qedf_initiate_abts(orig_io_req, false);
goto out;
}
spin_unlock_irqrestore(&fcport->rport_lock, flags);
out:
return rc;
}
static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg)
{
struct qedf_ioreq *orig_io_req;
struct qedf_ioreq *rec_req;
struct qedf_mp_req *mp_req;
struct fc_frame_header *mp_fc_hdr, *fh;
struct fc_frame *fp;
void *resp_buf, *fc_payload;
u32 resp_len;
struct fc_lport *lport;
struct qedf_ctx *qedf;
int refcount;
enum fc_rctl r_ctl;
struct fc_els_ls_rjt *rjt;
struct fc_els_rec_acc *acc;
u8 opcode;
u32 offset, e_stat;
struct scsi_cmnd *sc_cmd;
bool srr_needed = false;
rec_req = cb_arg->io_req;
qedf = rec_req->fcport->qedf;
lport = qedf->lport;
orig_io_req = cb_arg->aborted_io_req;
if (!orig_io_req)
goto out_free;
if (rec_req->event != QEDF_IOREQ_EV_ELS_TMO &&
rec_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
cancel_delayed_work_sync(&orig_io_req->timeout_work);
refcount = atomic_read(&orig_io_req->refcount.refcount);
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p,"
" orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
orig_io_req, orig_io_req->xid, rec_req->xid, refcount);
/* If a REC times out, free resources */
if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO)
goto out_free;
/* Normalize response data into struct fc_frame */
mp_req = &(rec_req->mp_req);
mp_fc_hdr = &(mp_req->resp_fc_hdr);
resp_len = mp_req->resp_len;
acc = resp_buf = mp_req->resp_buf;
fp = fc_frame_alloc(lport, resp_len);
if (!fp) {
QEDF_ERR(&(qedf->dbg_ctx),
"fc_frame_alloc failure.\n");
goto out_free;
}
/* Copy frame header from firmware into fp */
fh = (struct fc_frame_header *)fc_frame_header_get(fp);
memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
/* Copy payload from firmware into fp */
fc_payload = fc_frame_payload_get(fp, resp_len);
memcpy(fc_payload, resp_buf, resp_len);
opcode = fc_frame_payload_op(fp);
if (opcode == ELS_LS_RJT) {
rjt = fc_frame_payload_get(fp, sizeof(*rjt));
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
"Received LS_RJT for REC: er_reason=0x%x, "
"er_explan=0x%x.\n", rjt->er_reason, rjt->er_explan);
/*
* The following response(s) mean that we need to reissue the
* request on another exchange. We need to do this without
* informing the upper layers lest it cause an application
* error.
*/
if ((rjt->er_reason == ELS_RJT_LOGIC ||
rjt->er_reason == ELS_RJT_UNAB) &&
rjt->er_explan == ELS_EXPL_OXID_RXID) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
"Handle CMD LOST case.\n");
qedf_requeue_io_req(orig_io_req);
}
} else if (opcode == ELS_LS_ACC) {
offset = ntohl(acc->reca_fc4value);
e_stat = ntohl(acc->reca_e_stat);
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
"Received LS_ACC for REC: offset=0x%x, e_stat=0x%x.\n",
offset, e_stat);
if (e_stat & ESB_ST_SEQ_INIT) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
"Target has the seq init\n");
goto out_free_frame;
}
sc_cmd = orig_io_req->sc_cmd;
if (!sc_cmd) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
"sc_cmd is NULL for xid=0x%x.\n",
orig_io_req->xid);
goto out_free_frame;
}
/* SCSI write case */
if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
if (offset == orig_io_req->data_xfer_len) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
"WRITE - response lost.\n");
r_ctl = FC_RCTL_DD_CMD_STATUS;
srr_needed = true;
offset = 0;
} else {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
"WRITE - XFER_RDY/DATA lost.\n");
r_ctl = FC_RCTL_DD_DATA_DESC;
/* Use data from warning CQE instead of REC */
offset = orig_io_req->tx_buf_off;
}
/* SCSI read case */
} else {
if (orig_io_req->rx_buf_off ==
orig_io_req->data_xfer_len) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
"READ - response lost.\n");
srr_needed = true;
r_ctl = FC_RCTL_DD_CMD_STATUS;
offset = 0;
} else {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
"READ - DATA lost.\n");
/*
* For read case we always set the offset to 0
* for sequence recovery task.
*/
offset = 0;
r_ctl = FC_RCTL_DD_SOL_DATA;
}
}
if (srr_needed)
qedf_send_srr(orig_io_req, offset, r_ctl);
else
qedf_initiate_seq_cleanup(orig_io_req, offset, r_ctl);
}
out_free_frame:
fc_frame_free(fp);
out_free:
/* Put reference for original command since REC completed */
kref_put(&orig_io_req->refcount, qedf_release_cmd);
kfree(cb_arg);
}
/* Assumes kref is already held by caller */
int qedf_send_rec(struct qedf_ioreq *orig_io_req)
{
struct fc_els_rec rec;
struct qedf_rport *fcport;
struct fc_lport *lport;
struct qedf_els_cb_arg *cb_arg = NULL;
struct qedf_ctx *qedf;
uint32_t sid;
uint32_t r_a_tov;
int rc;
if (!orig_io_req) {
QEDF_ERR(NULL, "orig_io_req is NULL.\n");
return -EINVAL;
}
fcport = orig_io_req->fcport;
/* Check that fcport is still offloaded */
if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
return -EINVAL;
}
if (!fcport->qedf) {
QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
return -EINVAL;
}
/* Take reference until REC command completion */
kref_get(&orig_io_req->refcount);
qedf = fcport->qedf;
lport = qedf->lport;
sid = fcport->sid;
r_a_tov = lport->r_a_tov;
memset(&rec, 0, sizeof(rec));
cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
if (!cb_arg) {
QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
"REC\n");
rc = -ENOMEM;
goto rec_err;
}
cb_arg->aborted_io_req = orig_io_req;
rec.rec_cmd = ELS_REC;
hton24(rec.rec_s_id, sid);
rec.rec_ox_id = htons(orig_io_req->xid);
rec.rec_rx_id =
htons(orig_io_req->task->tstorm_st_context.read_write.rx_id);
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending REC orig_io=%p, "
"orig_xid=0x%x rx_id=0x%x\n", orig_io_req,
orig_io_req->xid, rec.rec_rx_id);
rc = qedf_initiate_els(fcport, ELS_REC, &rec, sizeof(rec),
qedf_rec_compl, cb_arg, r_a_tov);
rec_err:
if (rc) {
QEDF_ERR(&(qedf->dbg_ctx), "REC failed - release orig_io_req"
"=0x%x\n", orig_io_req->xid);
kfree(cb_arg);
kref_put(&orig_io_req->refcount, qedf_release_cmd);
}
return rc;
}

View File

@ -0,0 +1,269 @@
/*
* QLogic FCoE Offload Driver
* Copyright (c) 2016 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include "qedf.h"
extern const struct qed_fcoe_ops *qed_ops;
/*
* FIP VLAN functions that will eventually move to libfcoe.
*/
void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf)
{
struct sk_buff *skb;
char *eth_fr;
int fr_len;
struct fip_vlan *vlan;
#define MY_FIP_ALL_FCF_MACS ((__u8[6]) { 1, 0x10, 0x18, 1, 0, 2 })
static u8 my_fcoe_all_fcfs[ETH_ALEN] = MY_FIP_ALL_FCF_MACS;
skb = dev_alloc_skb(sizeof(struct fip_vlan));
if (!skb)
return;
fr_len = sizeof(*vlan);
eth_fr = (char *)skb->data;
vlan = (struct fip_vlan *)eth_fr;
memset(vlan, 0, sizeof(*vlan));
ether_addr_copy(vlan->eth.h_source, qedf->mac);
ether_addr_copy(vlan->eth.h_dest, my_fcoe_all_fcfs);
vlan->eth.h_proto = htons(ETH_P_FIP);
vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
vlan->fip.fip_op = htons(FIP_OP_VLAN);
vlan->fip.fip_subcode = FIP_SC_VL_REQ;
vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW);
vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW;
ether_addr_copy(vlan->desc.mac.fd_mac, qedf->mac);
vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;
put_unaligned_be64(qedf->lport->wwnn, &vlan->desc.wwnn.fd_wwn);
skb_put(skb, sizeof(*vlan));
skb->protocol = htons(ETH_P_FIP);
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Sending FIP VLAN "
"request.");
if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
QEDF_WARN(&(qedf->dbg_ctx), "Cannot send vlan request "
"because link is not up.\n");
kfree_skb(skb);
return;
}
qed_ops->ll2->start_xmit(qedf->cdev, skb);
}
static void qedf_fcoe_process_vlan_resp(struct qedf_ctx *qedf,
struct sk_buff *skb)
{
struct fip_header *fiph;
struct fip_desc *desc;
u16 vid = 0;
ssize_t rlen;
size_t dlen;
fiph = (struct fip_header *)(((void *)skb->data) + 2 * ETH_ALEN + 2);
rlen = ntohs(fiph->fip_dl_len) * 4;
desc = (struct fip_desc *)(fiph + 1);
while (rlen > 0) {
dlen = desc->fip_dlen * FIP_BPW;
switch (desc->fip_dtype) {
case FIP_DT_VLAN:
vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
break;
}
desc = (struct fip_desc *)((char *)desc + dlen);
rlen -= dlen;
}
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "VLAN response, "
"vid=0x%x.\n", vid);
if (vid > 0 && qedf->vlan_id != vid) {
qedf_set_vlan_id(qedf, vid);
/* Inform waiter that it's ok to call fcoe_ctlr_link up() */
complete(&qedf->fipvlan_compl);
}
}
void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
{
struct qedf_ctx *qedf = container_of(fip, struct qedf_ctx, ctlr);
struct ethhdr *eth_hdr;
struct vlan_ethhdr *vlan_hdr;
struct fip_header *fiph;
u16 op, vlan_tci = 0;
u8 sub;
if (!test_bit(QEDF_LL2_STARTED, &qedf->flags)) {
QEDF_WARN(&(qedf->dbg_ctx), "LL2 not started\n");
kfree_skb(skb);
return;
}
fiph = (struct fip_header *) ((void *)skb->data + 2 * ETH_ALEN + 2);
eth_hdr = (struct ethhdr *)skb_mac_header(skb);
op = ntohs(fiph->fip_op);
sub = fiph->fip_subcode;
if (!qedf->vlan_hw_insert) {
vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, sizeof(*vlan_hdr)
- sizeof(*eth_hdr));
memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
vlan_hdr->h_vlan_TCI = vlan_tci = htons(qedf->vlan_id);
}
/* Update eth_hdr since we added a VLAN tag */
eth_hdr = (struct ethhdr *)skb_mac_header(skb);
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FIP frame send: "
"dest=%pM op=%x sub=%x vlan=%04x.", eth_hdr->h_dest, op, sub,
ntohs(vlan_tci));
if (qedf_dump_frames)
print_hex_dump(KERN_WARNING, "fip ", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, skb->len, false);
qed_ops->ll2->start_xmit(qedf->cdev, skb);
}
/* Process incoming FIP frames. */
void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
{
struct ethhdr *eth_hdr;
struct fip_header *fiph;
struct fip_desc *desc;
struct fip_mac_desc *mp;
struct fip_wwn_desc *wp;
struct fip_vn_desc *vp;
size_t rlen, dlen;
uint32_t cvl_port_id;
__u8 cvl_mac[ETH_ALEN];
u16 op;
u8 sub;
eth_hdr = (struct ethhdr *)skb_mac_header(skb);
fiph = (struct fip_header *) ((void *)skb->data + 2 * ETH_ALEN + 2);
op = ntohs(fiph->fip_op);
sub = fiph->fip_subcode;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FIP frame received: "
"skb=%p fiph=%p source=%pM op=%x sub=%x", skb, fiph,
eth_hdr->h_source, op, sub);
if (qedf_dump_frames)
print_hex_dump(KERN_WARNING, "fip ", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, skb->len, false);
/* Handle FIP VLAN resp in the driver */
if (op == FIP_OP_VLAN && sub == FIP_SC_VL_NOTE) {
qedf_fcoe_process_vlan_resp(qedf, skb);
qedf->vlan_hw_insert = 0;
kfree_skb(skb);
} else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Clear virtual "
"link received.\n");
/* Check that an FCF has been selected by fcoe */
if (qedf->ctlr.sel_fcf == NULL) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
"Dropping CVL since FCF has not been selected "
"yet.");
return;
}
cvl_port_id = 0;
memset(cvl_mac, 0, ETH_ALEN);
/*
* We need to loop through the CVL descriptors to determine
* if we want to reset the fcoe link
*/
rlen = ntohs(fiph->fip_dl_len) * FIP_BPW;
desc = (struct fip_desc *)(fiph + 1);
while (rlen >= sizeof(*desc)) {
dlen = desc->fip_dlen * FIP_BPW;
switch (desc->fip_dtype) {
case FIP_DT_MAC:
mp = (struct fip_mac_desc *)desc;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
"fd_mac=%pM.\n", __func__, mp->fd_mac);
ether_addr_copy(cvl_mac, mp->fd_mac);
break;
case FIP_DT_NAME:
wp = (struct fip_wwn_desc *)desc;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
"fc_wwpn=%016llx.\n",
get_unaligned_be64(&wp->fd_wwn));
break;
case FIP_DT_VN_ID:
vp = (struct fip_vn_desc *)desc;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
"fd_fc_id=%x.\n", ntoh24(vp->fd_fc_id));
cvl_port_id = ntoh24(vp->fd_fc_id);
break;
default:
/* Ignore anything else */
break;
}
desc = (struct fip_desc *)((char *)desc + dlen);
rlen -= dlen;
}
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
"cvl_port_id=%06x cvl_mac=%pM.\n", cvl_port_id,
cvl_mac);
if (cvl_port_id == qedf->lport->port_id &&
ether_addr_equal(cvl_mac,
qedf->ctlr.sel_fcf->fcf_mac)) {
fcoe_ctlr_link_down(&qedf->ctlr);
qedf_wait_for_upload(qedf);
fcoe_ctlr_link_up(&qedf->ctlr);
}
kfree_skb(skb);
} else {
/* Everything else is handled by libfcoe */
__skb_pull(skb, ETH_HLEN);
fcoe_ctlr_recv(&qedf->ctlr, skb);
}
}
void qedf_update_src_mac(struct fc_lport *lport, u8 *addr)
{
struct qedf_ctx *qedf = lport_priv(lport);
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
"Setting data_src_addr=%pM.\n", addr);
ether_addr_copy(qedf->data_src_addr, addr);
}
u8 *qedf_get_src_mac(struct fc_lport *lport)
{
u8 mac[ETH_ALEN];
u8 port_id[3];
struct qedf_ctx *qedf = lport_priv(lport);
/* We need to use the lport port_id to create the data_src_addr */
if (is_zero_ether_addr(qedf->data_src_addr)) {
hton24(port_id, lport->port_id);
fc_fcoe_set_mac(mac, port_id);
qedf->ctlr.update_mac(lport, mac);
}
return qedf->data_src_addr;
}

View File

@ -0,0 +1,422 @@
/*
* QLogic FCoE Offload Driver
* Copyright (c) 2016 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef __QEDF_HSI__
#define __QEDF_HSI__
/*
* Add include to common target
*/
#include <linux/qed/common_hsi.h>
/*
* Add include to common storage target
*/
#include <linux/qed/storage_common.h>
/*
* Add include to common fcoe target for both eCore and protocol driver
*/
#include <linux/qed/fcoe_common.h>
/*
* FCoE CQ element ABTS information
*/
struct fcoe_abts_info {
u8 r_ctl /* R_CTL in the ABTS response frame */;
u8 reserved0;
__le16 rx_id;
__le32 reserved2[2];
__le32 fc_payload[3] /* ABTS FC payload response frame */;
};
/*
* FCoE class type
*/
enum fcoe_class_type {
FCOE_TASK_CLASS_TYPE_3,
FCOE_TASK_CLASS_TYPE_2,
MAX_FCOE_CLASS_TYPE
};
/*
* FCoE CMDQ element control information
*/
struct fcoe_cmdqe_control {
__le16 conn_id;
u8 num_additional_cmdqes;
u8 cmdType;
/* true for ABTS request cmdqe. used in Target mode */
#define FCOE_CMDQE_CONTROL_ABTSREQCMD_MASK 0x1
#define FCOE_CMDQE_CONTROL_ABTSREQCMD_SHIFT 0
#define FCOE_CMDQE_CONTROL_RESERVED1_MASK 0x7F
#define FCOE_CMDQE_CONTROL_RESERVED1_SHIFT 1
u8 reserved2[4];
};
/*
* FCoE control + payload CMDQ element
*/
struct fcoe_cmdqe {
struct fcoe_cmdqe_control hdr;
u8 fc_header[24];
__le32 fcp_cmd_payload[8];
};
/*
* FCP RSP flags
*/
struct fcoe_fcp_rsp_flags {
u8 flags;
#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID_MASK 0x1
#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID_SHIFT 0
#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID_MASK 0x1
#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID_SHIFT 1
#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER_MASK 0x1
#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER_SHIFT 2
#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER_MASK 0x1
#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER_SHIFT 3
#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ_MASK 0x1
#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ_SHIFT 4
#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_MASK 0x7
#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_SHIFT 5
};
/*
* FCoE CQ element response information
*/
struct fcoe_cqe_rsp_info {
struct fcoe_fcp_rsp_flags rsp_flags;
u8 scsi_status_code;
__le16 retry_delay_timer;
__le32 fcp_resid;
__le32 fcp_sns_len;
__le32 fcp_rsp_len;
__le16 rx_id;
u8 fw_error_flags;
#define FCOE_CQE_RSP_INFO_FW_UNDERRUN_MASK 0x1 /* FW detected underrun */
#define FCOE_CQE_RSP_INFO_FW_UNDERRUN_SHIFT 0
#define FCOE_CQE_RSP_INFO_RESREVED_MASK 0x7F
#define FCOE_CQE_RSP_INFO_RESREVED_SHIFT 1
u8 reserved;
__le32 fw_residual /* Residual bytes calculated by FW */;
};
/*
* FCoE CQ element Target completion information
*/
struct fcoe_cqe_target_info {
__le16 rx_id;
__le16 reserved0;
__le32 reserved1[5];
};
/*
* FCoE error/warning reporting entry
*/
struct fcoe_err_report_entry {
__le32 err_warn_bitmap_lo /* Error bitmap lower 32 bits */;
__le32 err_warn_bitmap_hi /* Error bitmap higher 32 bits */;
/* Buffer offset the beginning of the Sequence last transmitted */
__le32 tx_buf_off;
/* Buffer offset from the beginning of the Sequence last received */
__le32 rx_buf_off;
__le16 rx_id /* RX_ID of the associated task */;
__le16 reserved1;
__le32 reserved2;
};
/*
* FCoE CQ element middle path information
*/
struct fcoe_cqe_midpath_info {
__le32 data_placement_size;
__le16 rx_id;
__le16 reserved0;
__le32 reserved1[4];
};
/*
* FCoE CQ element unsolicited information
*/
struct fcoe_unsolic_info {
/* BD information: Physical address and opaque data */
struct scsi_bd bd_info;
__le16 conn_id /* Connection ID the frame is associated to */;
__le16 pkt_len /* Packet length */;
u8 reserved1[4];
};
/*
* FCoE warning reporting entry
*/
struct fcoe_warning_report_entry {
/* BD information: Physical address and opaque data */
struct scsi_bd bd_info;
/* Buffer offset the beginning of the Sequence last transmitted */
__le32 buf_off;
__le16 rx_id /* RX_ID of the associated task */;
__le16 reserved1;
};
/*
* FCoE CQ element information
*/
union fcoe_cqe_info {
struct fcoe_cqe_rsp_info rsp_info /* Response completion information */;
/* Target completion information */
struct fcoe_cqe_target_info target_info;
/* Error completion information */
struct fcoe_err_report_entry err_info;
struct fcoe_abts_info abts_info /* ABTS completion information */;
/* Middle path completion information */
struct fcoe_cqe_midpath_info midpath_info;
/* Unsolicited packet completion information */
struct fcoe_unsolic_info unsolic_info;
/* Warning completion information (Rec Tov expiration) */
struct fcoe_warning_report_entry warn_info;
};
/*
* FCoE CQ element
*/
struct fcoe_cqe {
__le32 cqe_data;
/* The task identifier (OX_ID) to be completed */
#define FCOE_CQE_TASK_ID_MASK 0xFFFF
#define FCOE_CQE_TASK_ID_SHIFT 0
/*
* The CQE type: 0x0 Indicating on a pending work request completion.
* 0x1 - Indicating on an unsolicited event notification. use enum
* fcoe_cqe_type (use enum fcoe_cqe_type)
*/
#define FCOE_CQE_CQE_TYPE_MASK 0xF
#define FCOE_CQE_CQE_TYPE_SHIFT 16
#define FCOE_CQE_RESERVED0_MASK 0xFFF
#define FCOE_CQE_RESERVED0_SHIFT 20
__le16 reserved1;
__le16 fw_cq_prod;
union fcoe_cqe_info cqe_info;
};
/*
* FCoE CQE type
*/
enum fcoe_cqe_type {
/* solicited response on a R/W or middle-path SQE */
FCOE_GOOD_COMPLETION_CQE_TYPE,
FCOE_UNSOLIC_CQE_TYPE /* unsolicited packet, RQ consumed */,
FCOE_ERROR_DETECTION_CQE_TYPE /* timer expiration, validation error */,
FCOE_WARNING_CQE_TYPE /* rec_tov or rr_tov timer expiration */,
FCOE_EXCH_CLEANUP_CQE_TYPE /* task cleanup completed */,
FCOE_ABTS_CQE_TYPE /* ABTS received and task cleaned */,
FCOE_DUMMY_CQE_TYPE /* just increment SQ CONS */,
/* Task was completed wight after sending a pkt to the target */
FCOE_LOCAL_COMP_CQE_TYPE,
MAX_FCOE_CQE_TYPE
};
/*
* FCoE device type
*/
enum fcoe_device_type {
FCOE_TASK_DEV_TYPE_DISK,
FCOE_TASK_DEV_TYPE_TAPE,
MAX_FCOE_DEVICE_TYPE
};
/*
* FCoE fast path error codes
*/
enum fcoe_fp_error_warning_code {
FCOE_ERROR_CODE_XFER_OOO_RO /* XFER error codes */,
FCOE_ERROR_CODE_XFER_RO_NOT_ALIGNED,
FCOE_ERROR_CODE_XFER_NULL_BURST_LEN,
FCOE_ERROR_CODE_XFER_RO_GREATER_THAN_DATA2TRNS,
FCOE_ERROR_CODE_XFER_INVALID_PAYLOAD_SIZE,
FCOE_ERROR_CODE_XFER_TASK_TYPE_NOT_WRITE,
FCOE_ERROR_CODE_XFER_PEND_XFER_SET,
FCOE_ERROR_CODE_XFER_OPENED_SEQ,
FCOE_ERROR_CODE_XFER_FCTL,
FCOE_ERROR_CODE_FCP_RSP_BIDI_FLAGS_SET /* FCP RSP error codes */,
FCOE_ERROR_CODE_FCP_RSP_INVALID_LENGTH_FIELD,
FCOE_ERROR_CODE_FCP_RSP_INVALID_SNS_FIELD,
FCOE_ERROR_CODE_FCP_RSP_INVALID_PAYLOAD_SIZE,
FCOE_ERROR_CODE_FCP_RSP_PEND_XFER_SET,
FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ,
FCOE_ERROR_CODE_FCP_RSP_FCTL,
FCOE_ERROR_CODE_FCP_RSP_LAST_SEQ_RESET,
FCOE_ERROR_CODE_FCP_RSP_CONF_REQ_NOT_SUPPORTED_YET,
FCOE_ERROR_CODE_DATA_OOO_RO /* FCP DATA error codes */,
FCOE_ERROR_CODE_DATA_EXCEEDS_DEFINED_MAX_FRAME_SIZE,
FCOE_ERROR_CODE_DATA_EXCEEDS_DATA2TRNS,
FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET,
FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET,
FCOE_ERROR_CODE_DATA_EOFN_END_SEQ_SET,
FCOE_ERROR_CODE_DATA_EOFT_END_SEQ_RESET,
FCOE_ERROR_CODE_DATA_TASK_TYPE_NOT_READ,
FCOE_ERROR_CODE_DATA_FCTL_INITIATIR,
FCOE_ERROR_CODE_MIDPATH_INVALID_TYPE /* Middle path error codes */,
FCOE_ERROR_CODE_MIDPATH_SOFI3_SEQ_ACTIVE_SET,
FCOE_ERROR_CODE_MIDPATH_SOFN_SEQ_ACTIVE_RESET,
FCOE_ERROR_CODE_MIDPATH_EOFN_END_SEQ_SET,
FCOE_ERROR_CODE_MIDPATH_EOFT_END_SEQ_RESET,
FCOE_ERROR_CODE_MIDPATH_REPLY_FCTL,
FCOE_ERROR_CODE_MIDPATH_INVALID_REPLY,
FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_RCTL,
FCOE_ERROR_CODE_COMMON_MIDDLE_FRAME_WITH_PAD /* Common error codes */,
FCOE_ERROR_CODE_COMMON_SEQ_INIT_IN_TCE,
FCOE_ERROR_CODE_COMMON_FC_HDR_RX_ID_MISMATCH,
FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT,
FCOE_ERROR_CODE_COMMON_DATA_FC_HDR_FCP_TYPE_MISMATCH,
FCOE_ERROR_CODE_COMMON_DATA_NO_MORE_SGES,
FCOE_ERROR_CODE_COMMON_OPTIONAL_FC_HDR,
FCOE_ERROR_CODE_COMMON_READ_TCE_OX_ID_TOO_BIG,
FCOE_ERROR_CODE_COMMON_DATA_WAS_NOT_TRANSMITTED,
FCOE_ERROR_CODE_COMMON_TASK_DDF_RCTL_INFO_FIELD,
FCOE_ERROR_CODE_COMMON_TASK_INVALID_RCTL,
FCOE_ERROR_CODE_COMMON_TASK_RCTL_GENERAL_MISMATCH,
FCOE_ERROR_CODE_E_D_TOV_TIMER_EXPIRATION /* Timer error codes */,
FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION /* Timer error codes */,
FCOE_ERROR_CODE_RR_TOV_TIMER_EXPIRATION /* Timer error codes */,
/* ABTSrsp pckt arrived unexpected */
FCOE_ERROR_CODE_ABTS_REPLY_UNEXPECTED,
FCOE_ERROR_CODE_TARGET_MODE_FCP_RSP,
FCOE_ERROR_CODE_TARGET_MODE_FCP_XFER,
FCOE_ERROR_CODE_TARGET_MODE_DATA_TASK_TYPE_NOT_WRITE,
FCOE_ERROR_CODE_DATA_FCTL_TARGET,
FCOE_ERROR_CODE_TARGET_DATA_SIZE_NO_MATCH_XFER,
FCOE_ERROR_CODE_TARGET_DIF_CRC_CHECKSUM_ERROR,
FCOE_ERROR_CODE_TARGET_DIF_REF_TAG_ERROR,
FCOE_ERROR_CODE_TARGET_DIF_APP_TAG_ERROR,
MAX_FCOE_FP_ERROR_WARNING_CODE
};
/*
* FCoE RESPQ element
*/
struct fcoe_respqe {
__le16 ox_id /* OX_ID that is located in the FCP_RSP FC header */;
__le16 rx_id /* RX_ID that is located in the FCP_RSP FC header */;
__le32 additional_info;
/* PARAM that is located in the FCP_RSP FC header */
#define FCOE_RESPQE_PARAM_MASK 0xFFFFFF
#define FCOE_RESPQE_PARAM_SHIFT 0
/* Indication whther its Target-auto-rsp mode or not */
#define FCOE_RESPQE_TARGET_AUTO_RSP_MASK 0xFF
#define FCOE_RESPQE_TARGET_AUTO_RSP_SHIFT 24
};
/*
* FCoE slow path error codes
*/
enum fcoe_sp_error_code {
/* Error codes for Error Reporting in slow path flows */
FCOE_ERROR_CODE_SLOW_PATH_TOO_MANY_FUNCS,
FCOE_ERROR_SLOW_PATH_CODE_NO_LICENSE,
MAX_FCOE_SP_ERROR_CODE
};
/*
* FCoE SQE request type
*/
enum fcoe_sqe_request_type {
SEND_FCOE_CMD,
SEND_FCOE_MIDPATH,
SEND_FCOE_ABTS_REQUEST,
FCOE_EXCHANGE_CLEANUP,
FCOE_SEQUENCE_RECOVERY,
SEND_FCOE_XFER_RDY,
SEND_FCOE_RSP,
SEND_FCOE_RSP_WITH_SENSE_DATA,
SEND_FCOE_TARGET_DATA,
SEND_FCOE_INITIATOR_DATA,
/*
* Xfer Continuation (==1) ready to be sent. Previous XFERs data
* received successfully.
*/
SEND_FCOE_XFER_CONTINUATION_RDY,
SEND_FCOE_TARGET_ABTS_RSP,
MAX_FCOE_SQE_REQUEST_TYPE
};
/*
* FCoE task TX state
*/
enum fcoe_task_tx_state {
/* Initiate state after driver has initialized the task */
FCOE_TASK_TX_STATE_NORMAL,
/* Updated by TX path after complete transmitting unsolicited packet */
FCOE_TASK_TX_STATE_UNSOLICITED_COMPLETED,
/*
* Updated by TX path after start processing the task requesting the
* cleanup/abort operation
*/
FCOE_TASK_TX_STATE_CLEAN_REQ,
FCOE_TASK_TX_STATE_ABTS /* Updated by TX path during abort procedure */,
/* Updated by TX path during exchange cleanup procedure */
FCOE_TASK_TX_STATE_EXCLEANUP,
/*
* Updated by TX path during exchange cleanup continuation task
* procedure
*/
FCOE_TASK_TX_STATE_EXCLEANUP_TARGET_WRITE_CONT,
/* Updated by TX path during exchange cleanup first xfer procedure */
FCOE_TASK_TX_STATE_EXCLEANUP_TARGET_WRITE,
/* Updated by TX path during exchange cleanup read task in Target */
FCOE_TASK_TX_STATE_EXCLEANUP_TARGET_READ_OR_RSP,
/* Updated by TX path during target exchange cleanup procedure */
FCOE_TASK_TX_STATE_EXCLEANUP_TARGET_WRITE_LAST_CYCLE,
/* Updated by TX path during sequence recovery procedure */
FCOE_TASK_TX_STATE_SEQRECOVERY,
MAX_FCOE_TASK_TX_STATE
};
/*
* FCoE task type
*/
enum fcoe_task_type {
FCOE_TASK_TYPE_WRITE_INITIATOR,
FCOE_TASK_TYPE_READ_INITIATOR,
FCOE_TASK_TYPE_MIDPATH,
FCOE_TASK_TYPE_UNSOLICITED,
FCOE_TASK_TYPE_ABTS,
FCOE_TASK_TYPE_EXCHANGE_CLEANUP,
FCOE_TASK_TYPE_SEQUENCE_CLEANUP,
FCOE_TASK_TYPE_WRITE_TARGET,
FCOE_TASK_TYPE_READ_TARGET,
FCOE_TASK_TYPE_RSP,
FCOE_TASK_TYPE_RSP_SENSE_DATA,
FCOE_TASK_TYPE_ABTS_TARGET,
FCOE_TASK_TYPE_ENUM_SIZE,
MAX_FCOE_TASK_TYPE
};
struct scsi_glbl_queue_entry {
/* Start physical address for the RQ (receive queue) PBL. */
struct regpair rq_pbl_addr;
/* Start physical address for the CQ (completion queue) PBL. */
struct regpair cq_pbl_addr;
/* Start physical address for the CMDQ (command queue) PBL. */
struct regpair cmdq_pbl_addr;
};
#endif /* __QEDF_HSI__ */

2282
drivers/scsi/qedf/qedf_io.c Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,15 @@
/*
* QLogic FCoE Offload Driver
* Copyright (c) 2016 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#define QEDF_VERSION "8.10.7.0"
#define QEDF_DRIVER_MAJOR_VER 8
#define QEDF_DRIVER_MINOR_VER 10
#define QEDF_DRIVER_REV_VER 7
#define QEDF_DRIVER_ENG_VER 0