net: hns3: fix the VF queue reset flow error

VF queue reset flow is different from PF queue reset flow.
VF driver should stop VF queue first, then send message to PF
and PF do the reset. PF should send a response to VF after
PF complete the queue reset, VF can initialize the queue hw
after get the response.
This patch fixes the VF queue reset flow as the correct step.

Signed-off-by: Peng Li <lipeng321@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Peng Li 2018-03-21 15:49:21 +08:00 committed by David S. Miller
parent dd72140ca9
commit 1a426f8b40
4 changed files with 53 additions and 6 deletions

View File

@ -4926,6 +4926,43 @@ void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
}
}
void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
{
struct hclge_dev *hdev = vport->back;
int reset_try_times = 0;
int reset_status;
u16 queue_gid;
int ret;
queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
if (ret) {
dev_warn(&hdev->pdev->dev,
"Send reset tqp cmd fail, ret = %d\n", ret);
return;
}
reset_try_times = 0;
while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
/* Wait for tqp hw reset */
msleep(20);
reset_status = hclge_get_reset_status(hdev, queue_gid);
if (reset_status)
break;
}
if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
return;
}
ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
if (ret)
dev_warn(&hdev->pdev->dev,
"Deassert the soft reset fail, ret = %d\n", ret);
}
static u32 hclge_get_fw_version(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);

View File

@ -646,5 +646,6 @@ void hclge_rss_indir_init_cfg(struct hclge_dev *hdev);
void hclge_mbx_handler(struct hclge_dev *hdev);
void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id);
int hclge_cfg_flowctrl(struct hclge_dev *hdev);
#endif

View File

@ -322,14 +322,17 @@ static int hclge_get_link_info(struct hclge_vport *vport,
HCLGE_MBX_LINK_STAT_CHANGE, dest_vfid);
}
static void hclge_reset_vf_queue(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
static void hclge_mbx_reset_vf_queue(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
u16 queue_id;
memcpy(&queue_id, &mbx_req->msg[2], sizeof(queue_id));
hclge_reset_tqp(&vport->nic, queue_id);
hclge_reset_vf_queue(vport, queue_id);
/* send response msg to VF after queue reset complete*/
hclge_gen_resp_to_vf(vport, mbx_req, 0, NULL, 0);
}
void hclge_mbx_handler(struct hclge_dev *hdev)
@ -407,7 +410,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
ret);
break;
case HCLGE_MBX_QUEUE_RESET:
hclge_reset_vf_queue(vport, req);
hclge_mbx_reset_vf_queue(vport, req);
break;
default:
dev_err(&hdev->pdev->dev,

View File

@ -817,11 +817,17 @@ static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
u8 msg_data[2];
int ret;
memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 2, false,
NULL, 0);
/* disable vf queue before send queue reset msg to PF */
ret = hclgevf_tqp_enable(hdev, queue_id, 0, false);
if (ret)
return;
hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data,
2, true, NULL, 0);
}
static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)