[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <68ddc5e9191fcb12d1adb666a3e451af6404ec76.1760502478.git.zhuyikai1@h-partners.com>
Date: Wed, 15 Oct 2025 15:15:28 +0800
From: Fan Gong <gongfan1@...wei.com>
To: Fan Gong <gongfan1@...wei.com>, Zhu Yikai <zhuyikai1@...artners.com>
CC: <netdev@...r.kernel.org>, <linux-kernel@...r.kernel.org>, "David S.
Miller" <davem@...emloft.net>, Eric Dumazet <edumazet@...gle.com>, Jakub
Kicinski <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>, Simon Horman
<horms@...nel.org>, Andrew Lunn <andrew+netdev@...n.ch>,
<linux-doc@...r.kernel.org>, Jonathan Corbet <corbet@....net>, Bjorn Helgaas
<helgaas@...nel.org>, luosifu <luosifu@...wei.com>, Xin Guo
<guoxin09@...wei.com>, Shen Chenyang <shenchenyang1@...ilicon.com>, Zhou
Shuai <zhoushuai28@...wei.com>, Wu Like <wulike1@...wei.com>, Shi Jing
<shijing34@...wei.com>, Luo Yang <luoyang82@...artners.com>, Meny Yossefi
<meny.yossefi@...wei.com>, Gur Stavi <gur.stavi@...wei.com>, Lee Trager
<lee@...ger.us>, Michael Ellerman <mpe@...erman.id.au>, Vadim Fedorenko
<vadim.fedorenko@...ux.dev>, Suman Ghosh <sumang@...vell.com>, Przemek
Kitszel <przemyslaw.kitszel@...el.com>, Joe Damato <jdamato@...tly.com>,
Christophe JAILLET <christophe.jaillet@...adoo.fr>
Subject: [PATCH net-next v01 2/9] hinic3: Add PF management interfaces
Add management and communication pathways between PF and HW.
Co-developed-by: Zhu Yikai <zhuyikai1@...artners.com>
Signed-off-by: Zhu Yikai <zhuyikai1@...artners.com>
Signed-off-by: Fan Gong <gongfan1@...wei.com>
---
.../ethernet/huawei/hinic3/hinic3_hw_intf.h | 2 +
.../net/ethernet/huawei/hinic3/hinic3_hwdev.c | 51 ++-
.../net/ethernet/huawei/hinic3/hinic3_hwdev.h | 37 ++-
.../net/ethernet/huawei/hinic3/hinic3_main.c | 2 +
.../net/ethernet/huawei/hinic3/hinic3_mbox.c | 13 +
.../net/ethernet/huawei/hinic3/hinic3_mbox.h | 2 +
.../net/ethernet/huawei/hinic3/hinic3_mgmt.c | 308 +++++++++++++++++-
.../net/ethernet/huawei/hinic3/hinic3_mgmt.h | 53 +++
.../huawei/hinic3/hinic3_mgmt_interface.h | 1 +
.../huawei/hinic3/hinic3_netdev_ops.c | 35 ++
.../ethernet/huawei/hinic3/hinic3_nic_cfg.c | 28 ++
.../ethernet/huawei/hinic3/hinic3_nic_cfg.h | 18 +
.../ethernet/huawei/hinic3/hinic3_nic_dev.h | 2 +
13 files changed, 532 insertions(+), 20 deletions(-)
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h
index a0422ec0500f..329a9c464ff9 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h
@@ -39,6 +39,8 @@ enum mgmt_mod_type {
/* Configuration module */
MGMT_MOD_CFGM = 7,
MGMT_MOD_HILINK = 14,
+ /* hardware max module id */
+ MGMT_MOD_HW_MAX = 20,
};
static inline void mgmt_msg_params_init_default(struct mgmt_msg_params *msg_params,
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c
index 697b18e6faac..386f56f34866 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c
@@ -211,6 +211,36 @@ static int init_ceqs_msix_attr(struct hinic3_hwdev *hwdev)
return 0;
}
+static int hinic3_comm_pf_to_mgmt_init(struct hinic3_hwdev *hwdev)
+{
+ int err;
+
+ if (HINIC3_IS_VF(hwdev))
+ return 0;
+
+ err = hinic3_pf_to_mgmt_init(hwdev);
+ if (err)
+ return err;
+
+ set_bit(HINIC3_HWDEV_MGMT_INITED, &hwdev->func_state);
+
+ return 0;
+}
+
+static void hinic3_comm_pf_to_mgmt_free(struct hinic3_hwdev *hwdev)
+{
+ if (HINIC3_IS_VF(hwdev))
+ return;
+
+ spin_lock_bh(&hwdev->channel_lock);
+ clear_bit(HINIC3_HWDEV_MGMT_INITED, &hwdev->func_state);
+ spin_unlock_bh(&hwdev->channel_lock);
+
+ hinic3_aeq_unregister_cb(hwdev, HINIC3_MSG_FROM_FW);
+
+ hinic3_pf_to_mgmt_free(hwdev);
+}
+
static int init_basic_mgmt_channel(struct hinic3_hwdev *hwdev)
{
int err;
@@ -412,10 +442,14 @@ static int hinic3_init_comm_ch(struct hinic3_hwdev *hwdev)
if (err)
return err;
- err = init_basic_attributes(hwdev);
+ err = hinic3_comm_pf_to_mgmt_init(hwdev);
if (err)
goto err_free_basic_mgmt_ch;
+ err = init_basic_attributes(hwdev);
+ if (err)
+ goto err_free_comm_pf_to_mgmt;
+
err = init_cmdqs_channel(hwdev);
if (err) {
dev_err(hwdev->dev, "Failed to init cmdq channel\n");
@@ -428,6 +462,8 @@ static int hinic3_init_comm_ch(struct hinic3_hwdev *hwdev)
err_clear_func_svc_used_state:
hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_COMM, 0);
+err_free_comm_pf_to_mgmt:
+ hinic3_comm_pf_to_mgmt_free(hwdev);
err_free_basic_mgmt_ch:
free_base_mgmt_channel(hwdev);
@@ -439,6 +475,7 @@ static void hinic3_uninit_comm_ch(struct hinic3_hwdev *hwdev)
hinic3_set_pf_status(hwdev->hwif, HINIC3_PF_STATUS_INIT);
hinic3_free_cmdqs_channel(hwdev);
hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_COMM, 0);
+ hinic3_comm_pf_to_mgmt_free(hwdev);
free_base_mgmt_channel(hwdev);
}
@@ -577,9 +614,21 @@ void hinic3_free_hwdev(struct hinic3_hwdev *hwdev)
void hinic3_set_api_stop(struct hinic3_hwdev *hwdev)
{
+ struct hinic3_recv_msg *recv_resp_msg;
struct hinic3_mbox *mbox;
spin_lock_bh(&hwdev->channel_lock);
+ if (HINIC3_IS_PF(hwdev) &&
+ test_bit(HINIC3_HWDEV_MGMT_INITED, &hwdev->func_state)) {
+ recv_resp_msg = &hwdev->pf_to_mgmt->recv_resp_msg_from_mgmt;
+ spin_lock_bh(&hwdev->pf_to_mgmt->sync_event_lock);
+ if (hwdev->pf_to_mgmt->event_flag == COMM_SEND_EVENT_START) {
+ complete(&recv_resp_msg->recv_done);
+ hwdev->pf_to_mgmt->event_flag = COMM_SEND_EVENT_TIMEOUT;
+ }
+ spin_unlock_bh(&hwdev->pf_to_mgmt->sync_event_lock);
+ }
+
if (test_bit(HINIC3_HWDEV_MBOX_INITED, &hwdev->func_state)) {
mbox = hwdev->mbox;
spin_lock(&mbox->mbox_lock);
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h
index 78cface6ddd7..58c0c0b55097 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h
@@ -39,24 +39,25 @@ struct hinic3_pcidev {
};
struct hinic3_hwdev {
- struct hinic3_pcidev *adapter;
- struct pci_dev *pdev;
- struct device *dev;
- int dev_id;
- struct hinic3_hwif *hwif;
- struct hinic3_cfg_mgmt_info *cfg_mgmt;
- struct hinic3_aeqs *aeqs;
- struct hinic3_ceqs *ceqs;
- struct hinic3_mbox *mbox;
- struct hinic3_cmdqs *cmdqs;
- struct delayed_work sync_time_task;
- struct workqueue_struct *workq;
- /* protect channel init and uninit */
- spinlock_t channel_lock;
- u64 features[COMM_MAX_FEATURE_QWORD];
- u32 wq_page_size;
- u8 max_cmdq;
- ulong func_state;
+ struct hinic3_pcidev *adapter;
+ struct pci_dev *pdev;
+ struct device *dev;
+ int dev_id;
+ struct hinic3_hwif *hwif;
+ struct hinic3_cfg_mgmt_info *cfg_mgmt;
+ struct hinic3_aeqs *aeqs;
+ struct hinic3_ceqs *ceqs;
+ struct hinic3_mbox *mbox;
+ struct hinic3_msg_pf_to_mgmt *pf_to_mgmt;
+ struct hinic3_cmdqs *cmdqs;
+ struct delayed_work sync_time_task;
+ struct workqueue_struct *workq;
+ /* protect hwdev channel init and uninit */
+ spinlock_t channel_lock;
+ u64 features[COMM_MAX_FEATURE_QWORD];
+ u32 wq_page_size;
+ u8 max_cmdq;
+ ulong func_state;
};
struct hinic3_event_info {
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_main.c b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c
index cecc59e9c536..5ac030c88085 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_main.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c
@@ -133,6 +133,8 @@ static int hinic3_sw_init(struct net_device *netdev)
u8 mac_addr[ETH_ALEN];
int err;
+ sema_init(&nic_dev->port_state_sem, 1);
+
nic_dev->q_params.sq_depth = HINIC3_SQ_DEPTH;
nic_dev->q_params.rq_depth = HINIC3_RQ_DEPTH;
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c
index b4e151e88a13..a4be5b2984cf 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c
@@ -870,6 +870,19 @@ int hinic3_send_mbox_to_mgmt(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd,
return err;
}
+void hinic3_response_mbox_to_mgmt(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd,
+ const void *buf_in, u32 in_size, u16 msg_id)
+{
+ struct mbox_msg_info msg_info;
+
+ msg_info.msg_id = (u8)msg_id;
+ msg_info.status = 0;
+
+ send_mbox_msg(hwdev->mbox, mod, cmd, buf_in, in_size,
+ MBOX_MGMT_FUNC_ID, MBOX_MSG_RESP,
+ MBOX_MSG_NO_ACK, &msg_info);
+}
+
int hinic3_send_mbox_to_mgmt_no_ack(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd,
const struct mgmt_msg_params *msg_params)
{
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h
index e71629e95086..e26f22d1d564 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h
@@ -135,6 +135,8 @@ void hinic3_free_mbox(struct hinic3_hwdev *hwdev);
int hinic3_send_mbox_to_mgmt(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd,
const struct mgmt_msg_params *msg_params);
+void hinic3_response_mbox_to_mgmt(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd,
+ const void *buf_in, u32 in_size, u16 msg_id);
int hinic3_send_mbox_to_mgmt_no_ack(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd,
const struct mgmt_msg_params *msg_params);
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c
index c38d10cd7fac..970ee0528770 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c
@@ -3,19 +3,325 @@
#include "hinic3_eqs.h"
#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
#include "hinic3_mbox.h"
#include "hinic3_mgmt.h"
+#define HINIC3_MSG_TO_MGMT_MAX_LEN 2016
+
+#define MGMT_MAX_PF_BUF_SIZE 2048UL
+#define MGMT_SEG_LEN_MAX 48
+#define MGMT_ASYNC_MSG_FLAG 0x8
+
+#define HINIC3_MGMT_WQ_NAME "hinic3_mgmt"
+
+/* Bogus sequence ID to prevent accidental match following partial message */
+#define MGMT_BOGUS_SEQ_ID \
+ (MGMT_MAX_PF_BUF_SIZE / MGMT_SEG_LEN_MAX + 1)
+
+static void mgmt_resp_msg_handler(struct hinic3_msg_pf_to_mgmt *pf_to_mgmt,
+ struct hinic3_recv_msg *recv_msg)
+{
+ struct device *dev = pf_to_mgmt->hwdev->dev;
+
+ /* Ignore async msg */
+ if (recv_msg->msg_id & MGMT_ASYNC_MSG_FLAG)
+ return;
+
+ spin_lock(&pf_to_mgmt->sync_event_lock);
+ if (recv_msg->msg_id != pf_to_mgmt->sync_msg_id) {
+ dev_err(dev, "msg id mismatch, send msg id: 0x%x, recv msg id: 0x%x, event state: %d\n",
+ pf_to_mgmt->sync_msg_id, recv_msg->msg_id,
+ pf_to_mgmt->event_flag);
+ } else if (pf_to_mgmt->event_flag == COMM_SEND_EVENT_START) {
+ pf_to_mgmt->event_flag = COMM_SEND_EVENT_SUCCESS;
+ complete(&recv_msg->recv_done);
+ } else {
+ dev_err(dev, "Wait timeout, send msg id: 0x%x, recv msg id: 0x%x, event state: %d\n",
+ pf_to_mgmt->sync_msg_id, recv_msg->msg_id,
+ pf_to_mgmt->event_flag);
+ }
+ spin_unlock(&pf_to_mgmt->sync_event_lock);
+}
+
+static void recv_mgmt_msg_work_handler(struct work_struct *work)
+{
+ struct hinic3_msg_pf_to_mgmt *pf_to_mgmt;
+ struct mgmt_msg_handle_work *mgmt_work;
+ struct mgmt_msg_head *ack_cmd;
+
+ mgmt_work = container_of(work, struct mgmt_msg_handle_work, work);
+
+ /* At the moment, we do not expect any meaningful messages but if the
+ * sender expects an ACK we still need to provide one with "unsupported"
+ * status.
+ */
+ if (mgmt_work->async_mgmt_to_pf)
+ goto out;
+
+ pf_to_mgmt = mgmt_work->pf_to_mgmt;
+ ack_cmd = pf_to_mgmt->mgmt_ack_buf;
+ memset(ack_cmd, 0, sizeof(*ack_cmd));
+ ack_cmd->status = MGMT_STATUS_CMD_UNSUPPORTED;
+
+ hinic3_response_mbox_to_mgmt(pf_to_mgmt->hwdev, mgmt_work->mod,
+ mgmt_work->cmd, ack_cmd, sizeof(*ack_cmd),
+ mgmt_work->msg_id);
+
+out:
+ kfree(mgmt_work->msg);
+ kfree(mgmt_work);
+}
+
+static int recv_msg_add_seg(struct hinic3_recv_msg *recv_msg,
+ __le64 msg_header, const void *seg_data,
+ bool *is_complete)
+{
+ u8 seq_id, msg_id, seg_len, is_last;
+ char *msg_buff;
+ u32 offset;
+
+ seg_len = MBOX_MSG_HEADER_GET(msg_header, SEG_LEN);
+ is_last = MBOX_MSG_HEADER_GET(msg_header, LAST);
+ seq_id = MBOX_MSG_HEADER_GET(msg_header, SEQID);
+ msg_id = MBOX_MSG_HEADER_GET(msg_header, MSG_ID);
+
+ if (seg_len > MGMT_SEG_LEN_MAX)
+ return -EINVAL;
+
+ /* All segments but last must be of maximal size */
+ if (seg_len != MGMT_SEG_LEN_MAX && !is_last)
+ return -EINVAL;
+
+ if (seq_id == 0) {
+ recv_msg->seq_id = seq_id;
+ recv_msg->msg_id = msg_id;
+ } else if (seq_id != recv_msg->seq_id + 1 ||
+ msg_id != recv_msg->msg_id) {
+ return -EINVAL;
+ }
+
+ offset = seq_id * MGMT_SEG_LEN_MAX;
+ if (offset + seg_len > MGMT_MAX_PF_BUF_SIZE)
+ return -EINVAL;
+
+ msg_buff = recv_msg->msg;
+ memcpy(msg_buff + offset, seg_data, seg_len);
+ recv_msg->msg_len = offset + seg_len;
+ recv_msg->seq_id = seq_id;
+ *is_complete = !!is_last;
+
+ return 0;
+}
+
+static void init_mgmt_msg_work(struct hinic3_msg_pf_to_mgmt *pf_to_mgmt,
+ struct hinic3_recv_msg *recv_msg)
+{
+ struct mgmt_msg_handle_work *mgmt_work;
+
+ mgmt_work = kmalloc(sizeof(*mgmt_work), GFP_KERNEL);
+ if (!mgmt_work)
+ return;
+
+ if (recv_msg->msg_len) {
+ mgmt_work->msg = kmalloc(recv_msg->msg_len, GFP_KERNEL);
+ if (!mgmt_work->msg) {
+ kfree(mgmt_work);
+ return;
+ }
+ }
+
+ mgmt_work->pf_to_mgmt = pf_to_mgmt;
+ mgmt_work->msg_len = recv_msg->msg_len;
+ memcpy(mgmt_work->msg, recv_msg->msg, recv_msg->msg_len);
+ mgmt_work->msg_id = recv_msg->msg_id;
+ mgmt_work->mod = recv_msg->mod;
+ mgmt_work->cmd = recv_msg->cmd;
+ mgmt_work->async_mgmt_to_pf = recv_msg->async_mgmt_to_pf;
+
+ INIT_WORK(&mgmt_work->work, recv_mgmt_msg_work_handler);
+ queue_work(pf_to_mgmt->workq, &mgmt_work->work);
+}
+
+static void recv_mgmt_msg_handler(struct hinic3_msg_pf_to_mgmt *pf_to_mgmt,
+ const u8 *header,
+ struct hinic3_recv_msg *recv_msg)
+{
+ struct hinic3_hwdev *hwdev = pf_to_mgmt->hwdev;
+ const void *seg_data;
+ __le64 msg_header;
+ bool is_complete;
+ u8 dir, msg_id;
+ int err;
+
+ msg_header = *(__force __le64 *)header;
+ dir = MBOX_MSG_HEADER_GET(msg_header, DIRECTION);
+ msg_id = MBOX_MSG_HEADER_GET(msg_header, MSG_ID);
+ /* Don't need to get anything from hw when cmd is async */
+ if (dir == MBOX_MSG_RESP && (msg_id & MGMT_ASYNC_MSG_FLAG))
+ return;
+
+ seg_data = header + sizeof(msg_header);
+ err = recv_msg_add_seg(recv_msg, msg_header, seg_data, &is_complete);
+ if (err) {
+ dev_err(hwdev->dev, "invalid receive segment\n");
+ /* set seq_id to invalid seq_id */
+ recv_msg->seq_id = MGMT_BOGUS_SEQ_ID;
+
+ return;
+ }
+
+ if (!is_complete)
+ return;
+
+ recv_msg->cmd = MBOX_MSG_HEADER_GET(msg_header, CMD);
+ recv_msg->mod = MBOX_MSG_HEADER_GET(msg_header, MODULE);
+ recv_msg->async_mgmt_to_pf = MBOX_MSG_HEADER_GET(msg_header, NO_ACK);
+ recv_msg->seq_id = MGMT_BOGUS_SEQ_ID;
+
+ if (dir == MBOX_MSG_RESP)
+ mgmt_resp_msg_handler(pf_to_mgmt, recv_msg);
+ else
+ init_mgmt_msg_work(pf_to_mgmt, recv_msg);
+}
+
+static int alloc_recv_msg(struct hinic3_recv_msg *recv_msg)
+{
+ recv_msg->seq_id = MGMT_BOGUS_SEQ_ID;
+
+ recv_msg->msg = kzalloc(MGMT_MAX_PF_BUF_SIZE, GFP_KERNEL);
+ if (!recv_msg->msg)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void free_recv_msg(struct hinic3_recv_msg *recv_msg)
+{
+ kfree(recv_msg->msg);
+}
+
+static int alloc_msg_buf(struct hinic3_msg_pf_to_mgmt *pf_to_mgmt)
+{
+ struct device *dev = pf_to_mgmt->hwdev->dev;
+ int err;
+
+ err = alloc_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt);
+ if (err) {
+ dev_err(dev, "Failed to allocate recv msg\n");
+ return err;
+ }
+
+ err = alloc_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt);
+ if (err) {
+ dev_err(dev, "Failed to allocate resp recv msg\n");
+ goto err_free_msg_from_mgmt;
+ }
+
+ pf_to_mgmt->mgmt_ack_buf = kzalloc(MGMT_MAX_PF_BUF_SIZE, GFP_KERNEL);
+ if (!pf_to_mgmt->mgmt_ack_buf) {
+ err = -ENOMEM;
+ goto err_free_resp_msg_from_mgmt;
+ }
+
+ return 0;
+
+err_free_resp_msg_from_mgmt:
+ free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt);
+err_free_msg_from_mgmt:
+ free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt);
+
+ return err;
+}
+
+static void free_msg_buf(struct hinic3_msg_pf_to_mgmt *pf_to_mgmt)
+{
+ kfree(pf_to_mgmt->mgmt_ack_buf);
+
+ free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt);
+ free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt);
+}
+
+int hinic3_pf_to_mgmt_init(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_msg_pf_to_mgmt *pf_to_mgmt;
+ int err;
+
+ pf_to_mgmt = kzalloc(sizeof(*pf_to_mgmt), GFP_KERNEL);
+ if (!pf_to_mgmt)
+ return -ENOMEM;
+
+ hwdev->pf_to_mgmt = pf_to_mgmt;
+ pf_to_mgmt->hwdev = hwdev;
+ spin_lock_init(&pf_to_mgmt->sync_event_lock);
+ pf_to_mgmt->workq = create_singlethread_workqueue(HINIC3_MGMT_WQ_NAME);
+ if (!pf_to_mgmt->workq) {
+ dev_err(hwdev->dev, "Failed to initialize MGMT workqueue\n");
+ err = -ENOMEM;
+ goto err_free_pf_to_mgmt;
+ }
+
+ err = alloc_msg_buf(pf_to_mgmt);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to allocate msg buffers\n");
+ goto err_destroy_workqueue;
+ }
+
+ return 0;
+
+err_destroy_workqueue:
+ destroy_workqueue(pf_to_mgmt->workq);
+err_free_pf_to_mgmt:
+ kfree(pf_to_mgmt);
+
+ return err;
+}
+
+void hinic3_pf_to_mgmt_free(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_msg_pf_to_mgmt *pf_to_mgmt = hwdev->pf_to_mgmt;
+
+ /* destroy workqueue before free related pf_to_mgmt resources in case of
+ * illegal resource access
+ */
+ destroy_workqueue(pf_to_mgmt->workq);
+
+ free_msg_buf(pf_to_mgmt);
+ kfree(pf_to_mgmt);
+}
+
void hinic3_flush_mgmt_workq(struct hinic3_hwdev *hwdev)
{
if (hwdev->aeqs)
flush_workqueue(hwdev->aeqs->workq);
+
+ if (HINIC3_IS_PF(hwdev) && hwdev->pf_to_mgmt)
+ flush_workqueue(hwdev->pf_to_mgmt->workq);
}
void hinic3_mgmt_msg_aeqe_handler(struct hinic3_hwdev *hwdev, u8 *header,
u8 size)
{
+ struct hinic3_msg_pf_to_mgmt *pf_to_mgmt;
+ struct hinic3_recv_msg *recv_msg;
+ __le64 msg_header;
+ bool is_send_dir;
+
if (MBOX_MSG_HEADER_GET(*(__force __le64 *)header, SOURCE) ==
- MBOX_MSG_FROM_MBOX)
+ MBOX_MSG_FROM_MBOX){
hinic3_mbox_func_aeqe_handler(hwdev, header, size);
+
+ return;
+ }
+
+ pf_to_mgmt = hwdev->pf_to_mgmt;
+ msg_header = *(__force __le64 *)header;
+
+ is_send_dir = (MBOX_MSG_HEADER_GET(msg_header, DIRECTION) ==
+ MBOX_MSG_SEND) ? true : false;
+
+ recv_msg = is_send_dir ? &pf_to_mgmt->recv_msg_from_mgmt :
+ &pf_to_mgmt->recv_resp_msg_from_mgmt;
+
+ recv_mgmt_msg_handler(pf_to_mgmt, header, recv_msg);
}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h
index bbef3b32a6ec..56f48d5442bc 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h
@@ -6,8 +6,61 @@
#include <linux/types.h>
+#include "hinic3_mbox.h"
+#include "hinic3_hw_intf.h"
+
struct hinic3_hwdev;
+struct hinic3_recv_msg {
+ /* Preallocated buffer of size MAX_PF_MGMT_BUF_SIZE that accumulates
+ * receive message, segment-by-segment.
+ */
+ void *msg;
+ /* Message id for which segments are accumulated. */
+ u8 msg_id;
+ /* Sequence id of last received segment of current message. */
+ u8 seq_id;
+ u16 msg_len;
+ int async_mgmt_to_pf;
+ enum mgmt_mod_type mod;
+ u16 cmd;
+ struct completion recv_done;
+};
+
+enum comm_pf_to_mgmt_event_state {
+ COMM_SEND_EVENT_UNINIT,
+ COMM_SEND_EVENT_START,
+ COMM_SEND_EVENT_SUCCESS,
+ COMM_SEND_EVENT_TIMEOUT,
+};
+
+struct hinic3_msg_pf_to_mgmt {
+ struct hinic3_hwdev *hwdev;
+ struct workqueue_struct *workq;
+ void *mgmt_ack_buf;
+ struct hinic3_recv_msg recv_msg_from_mgmt;
+ struct hinic3_recv_msg recv_resp_msg_from_mgmt;
+ u16 async_msg_id;
+ u16 sync_msg_id;
+ void *async_msg_cb_data[MGMT_MOD_HW_MAX];
+ /* synchronizes message send with message receives via event queue */
+ spinlock_t sync_event_lock;
+ enum comm_pf_to_mgmt_event_state event_flag;
+};
+
+struct mgmt_msg_handle_work {
+ struct work_struct work;
+ struct hinic3_msg_pf_to_mgmt *pf_to_mgmt;
+ void *msg;
+ u16 msg_len;
+ enum mgmt_mod_type mod;
+ u16 cmd;
+ u16 msg_id;
+ int async_mgmt_to_pf;
+};
+
+int hinic3_pf_to_mgmt_init(struct hinic3_hwdev *hwdev);
+void hinic3_pf_to_mgmt_free(struct hinic3_hwdev *hwdev);
void hinic3_flush_mgmt_workq(struct hinic3_hwdev *hwdev);
void hinic3_mgmt_msg_aeqe_handler(struct hinic3_hwdev *hwdev,
u8 *header, u8 size);
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h
index f9a3222b1b46..3a6d3ee534d0 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h
@@ -190,6 +190,7 @@ enum l2nic_ucode_cmd {
/* hilink mac group command */
enum mag_cmd {
+ MAG_CMD_SET_PORT_ENABLE = 6,
MAG_CMD_GET_LINK_STATUS = 7,
};
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c b/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c
index 0fa3c7900225..ebac18543a1c 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c
@@ -327,6 +327,31 @@ static void hinic3_close_channel(struct net_device *netdev)
hinic3_free_qp_ctxts(nic_dev);
}
+static int hinic3_maybe_set_port_state(struct net_device *netdev, bool enable)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ int err;
+
+ down(&nic_dev->port_state_sem);
+ err = hinic3_set_port_enable(nic_dev->hwdev, enable);
+ up(&nic_dev->port_state_sem);
+
+ return err;
+}
+
+static void hinic3_print_link_message(struct net_device *netdev,
+ bool link_status_up)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ if (nic_dev->link_status_up == link_status_up)
+ return;
+
+ nic_dev->link_status_up = link_status_up;
+
+ netdev_dbg(netdev, "Link is %s\n", (link_status_up ? "up" : "down"));
+}
+
static int hinic3_vport_up(struct net_device *netdev)
{
struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
@@ -341,6 +366,12 @@ static int hinic3_vport_up(struct net_device *netdev)
goto err_flush_qps_res;
}
+ err = hinic3_maybe_set_port_state(netdev, true);
+ if (err) {
+ netdev_err(netdev, "Failed to enable port\n");
+ goto err_disable_vport;
+ }
+
err = netif_set_real_num_queues(netdev, nic_dev->q_params.num_qps,
nic_dev->q_params.num_qps);
if (err) {
@@ -353,8 +384,12 @@ static int hinic3_vport_up(struct net_device *netdev)
if (!err && link_status_up)
netif_carrier_on(netdev);
+ hinic3_print_link_message(netdev, link_status_up);
+
return 0;
+err_disable_vport:
+ hinic3_set_vport_enable(nic_dev->hwdev, glb_func_id, false);
err_flush_qps_res:
hinic3_flush_qps_res(nic_dev->hwdev);
/* wait to guarantee that no packets will be sent to host */
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c
index 2b93026845ff..1ed266d5d40c 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c
@@ -362,6 +362,34 @@ int hinic3_force_drop_tx_pkt(struct hinic3_hwdev *hwdev)
return pkt_drop.msg_head.status;
}
+int hinic3_set_port_enable(struct hinic3_hwdev *hwdev, bool enable)
+{
+ struct mag_cmd_set_port_enable en_state = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ if (HINIC3_IS_VF(hwdev))
+ return 0;
+
+ en_state.function_id = hinic3_global_func_id(hwdev);
+ en_state.state = enable ? MAG_CMD_TX_ENABLE | MAG_CMD_RX_ENABLE :
+ MAG_CMD_PORT_DISABLE;
+
+ mgmt_msg_params_init_default(&msg_params, &en_state,
+ sizeof(en_state));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_HILINK,
+ MAG_CMD_SET_PORT_ENABLE, &msg_params);
+
+ if (err || en_state.head.status) {
+ dev_err(hwdev->dev, "Failed to set port state, err: %d, status: 0x%x\n",
+ err, en_state.head.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
int hinic3_sync_dcb_state(struct hinic3_hwdev *hwdev, u8 op_code, u8 state)
{
struct l2nic_cmd_set_dcb_state dcb_state = {};
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h
index 08bf14679bf8..d4326937db48 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h
@@ -34,6 +34,23 @@ struct hinic3_sq_attr {
u64 ci_dma_base;
};
+#define MAG_CMD_PORT_DISABLE 0x0
+#define MAG_CMD_TX_ENABLE 0x1
+#define MAG_CMD_RX_ENABLE 0x2
+/* the physical port is disabled only when all pf of the port are set to down,
+ * if any pf is enabled, the port is enabled
+ */
+struct mag_cmd_set_port_enable {
+ struct mgmt_msg_head head;
+
+ u16 function_id;
+ u16 rsvd0;
+
+ /* bitmap bit0:tx_en bit1:rx_en */
+ u8 state;
+ u8 rsvd1[3];
+};
+
int hinic3_get_nic_feature_from_hw(struct hinic3_nic_dev *nic_dev);
int hinic3_set_nic_feature_to_hw(struct hinic3_nic_dev *nic_dev);
bool hinic3_test_support(struct hinic3_nic_dev *nic_dev,
@@ -57,6 +74,7 @@ int hinic3_flush_qps_res(struct hinic3_hwdev *hwdev);
int hinic3_force_drop_tx_pkt(struct hinic3_hwdev *hwdev);
int hinic3_sync_dcb_state(struct hinic3_hwdev *hwdev, u8 op_code, u8 state);
+int hinic3_set_port_enable(struct hinic3_hwdev *hwdev, bool enable);
int hinic3_get_link_status(struct hinic3_hwdev *hwdev, bool *link_status_up);
int hinic3_set_vport_enable(struct hinic3_hwdev *hwdev, u16 func_id,
bool enable);
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h
index 5ba83261616c..3a9f3ccdb684 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h
@@ -83,6 +83,8 @@ struct hinic3_nic_dev {
struct hinic3_intr_coal_info *intr_coalesce;
+ struct semaphore port_state_sem;
+
bool link_status_up;
};
--
2.43.0
Powered by blists - more mailing lists