[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230703120116.37444-1-louis.peens@corigine.com>
Date: Mon, 3 Jul 2023 14:01:16 +0200
From: Louis Peens <louis.peens@...igine.com>
To: David Miller <davem@...emloft.net>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>
Cc: Jacob Keller <jacob.e.keller@...el.com>,
Simon Horman <simon.horman@...igine.com>,
Yinjun Zhang <yinjun.zhang@...igine.com>,
netdev@...r.kernel.org,
stable@...r.kernel.org,
oss-drivers@...igine.com
Subject: [PATCH net v2] nfp: clean mc addresses in application firmware when closing port
From: Yinjun Zhang <yinjun.zhang@...igine.com>
When moving devices from one namespace to another, mc addresses are
cleaned in software while not removed from application firmware. Thus
the mc addresses are remained and will cause resource leak.
Now use `__dev_mc_unsync` to clean mc addresses when closing port.
Fixes: e20aa071cd95 ("nfp: fix schedule in atomic context when sync mc address")
Cc: stable@...r.kernel.org
Signed-off-by: Yinjun Zhang <yinjun.zhang@...igine.com>
Acked-by: Simon Horman <simon.horman@...igine.com>
Signed-off-by: Louis Peens <louis.peens@...igine.com>
---
Changes since v1:
* Use __dev_mc_unsyc to clean mc addresses instead of tracking mc addresses by
driver itself.
* Clean mc addresses when closing port instead of driver exits,
so that the issue of moving devices between namespaces can be fixed.
* Modify commit message accordingly.
.../ethernet/netronome/nfp/nfp_net_common.c | 171 +++++++++---------
1 file changed, 87 insertions(+), 84 deletions(-)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 49f2f081ebb5..37b6a034c5d2 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -914,6 +914,90 @@ static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *addr)
nn_writew(nn, NFP_NET_CFG_MACADDR + 6, get_unaligned_be16(addr + 4));
}
+int nfp_net_sched_mbox_amsg_work(struct nfp_net *nn, u32 cmd, const void *data, size_t len,
+ int (*cb)(struct nfp_net *, struct nfp_mbox_amsg_entry *))
+{
+ struct nfp_mbox_amsg_entry *entry;
+
+ entry = kmalloc(sizeof(*entry) + len, GFP_ATOMIC);
+ if (!entry)
+ return -ENOMEM;
+
+ memcpy(entry->msg, data, len);
+ entry->cmd = cmd;
+ entry->cfg = cb;
+
+ spin_lock_bh(&nn->mbox_amsg.lock);
+ list_add_tail(&entry->list, &nn->mbox_amsg.list);
+ spin_unlock_bh(&nn->mbox_amsg.lock);
+
+ schedule_work(&nn->mbox_amsg.work);
+
+ return 0;
+}
+
+static void nfp_net_mbox_amsg_work(struct work_struct *work)
+{
+ struct nfp_net *nn = container_of(work, struct nfp_net, mbox_amsg.work);
+ struct nfp_mbox_amsg_entry *entry, *tmp;
+ struct list_head tmp_list;
+
+ INIT_LIST_HEAD(&tmp_list);
+
+ spin_lock_bh(&nn->mbox_amsg.lock);
+ list_splice_init(&nn->mbox_amsg.list, &tmp_list);
+ spin_unlock_bh(&nn->mbox_amsg.lock);
+
+ list_for_each_entry_safe(entry, tmp, &tmp_list, list) {
+ int err = entry->cfg(nn, entry);
+
+ if (err)
+ nn_err(nn, "Config cmd %d to HW failed %d.\n", entry->cmd, err);
+
+ list_del(&entry->list);
+ kfree(entry);
+ }
+}
+
+static int nfp_net_mc_cfg(struct nfp_net *nn, struct nfp_mbox_amsg_entry *entry)
+{
+ unsigned char *addr = entry->msg;
+ int ret;
+
+ ret = nfp_net_mbox_lock(nn, NFP_NET_CFG_MULTICAST_SZ);
+ if (ret)
+ return ret;
+
+ nn_writel(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_MULTICAST_MAC_HI,
+ get_unaligned_be32(addr));
+ nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_MULTICAST_MAC_LO,
+ get_unaligned_be16(addr + 4));
+
+ return nfp_net_mbox_reconfig_and_unlock(nn, entry->cmd);
+}
+
+static int nfp_net_mc_sync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+
+ if (netdev_mc_count(netdev) > NFP_NET_CFG_MAC_MC_MAX) {
+ nn_err(nn, "Requested number of MC addresses (%d) exceeds maximum (%d).\n",
+ netdev_mc_count(netdev), NFP_NET_CFG_MAC_MC_MAX);
+ return -EINVAL;
+ }
+
+ return nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_MULTICAST_ADD, addr,
+ NFP_NET_CFG_MULTICAST_SZ, nfp_net_mc_cfg);
+}
+
+static int nfp_net_mc_unsync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+
+ return nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_MULTICAST_DEL, addr,
+ NFP_NET_CFG_MULTICAST_SZ, nfp_net_mc_cfg);
+}
+
/**
* nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP
* @nn: NFP Net device to reconfigure
@@ -1084,6 +1168,9 @@ static int nfp_net_netdev_close(struct net_device *netdev)
/* Step 2: Tell NFP
*/
+ if (nn->cap_w1 & NFP_NET_CFG_CTRL_MCAST_FILTER)
+ __dev_mc_unsync(netdev, nfp_net_mc_unsync);
+
nfp_net_clear_config_and_disable(nn);
nfp_port_configure(netdev, false);
@@ -1335,90 +1422,6 @@ int nfp_ctrl_open(struct nfp_net *nn)
return err;
}
-int nfp_net_sched_mbox_amsg_work(struct nfp_net *nn, u32 cmd, const void *data, size_t len,
- int (*cb)(struct nfp_net *, struct nfp_mbox_amsg_entry *))
-{
- struct nfp_mbox_amsg_entry *entry;
-
- entry = kmalloc(sizeof(*entry) + len, GFP_ATOMIC);
- if (!entry)
- return -ENOMEM;
-
- memcpy(entry->msg, data, len);
- entry->cmd = cmd;
- entry->cfg = cb;
-
- spin_lock_bh(&nn->mbox_amsg.lock);
- list_add_tail(&entry->list, &nn->mbox_amsg.list);
- spin_unlock_bh(&nn->mbox_amsg.lock);
-
- schedule_work(&nn->mbox_amsg.work);
-
- return 0;
-}
-
-static void nfp_net_mbox_amsg_work(struct work_struct *work)
-{
- struct nfp_net *nn = container_of(work, struct nfp_net, mbox_amsg.work);
- struct nfp_mbox_amsg_entry *entry, *tmp;
- struct list_head tmp_list;
-
- INIT_LIST_HEAD(&tmp_list);
-
- spin_lock_bh(&nn->mbox_amsg.lock);
- list_splice_init(&nn->mbox_amsg.list, &tmp_list);
- spin_unlock_bh(&nn->mbox_amsg.lock);
-
- list_for_each_entry_safe(entry, tmp, &tmp_list, list) {
- int err = entry->cfg(nn, entry);
-
- if (err)
- nn_err(nn, "Config cmd %d to HW failed %d.\n", entry->cmd, err);
-
- list_del(&entry->list);
- kfree(entry);
- }
-}
-
-static int nfp_net_mc_cfg(struct nfp_net *nn, struct nfp_mbox_amsg_entry *entry)
-{
- unsigned char *addr = entry->msg;
- int ret;
-
- ret = nfp_net_mbox_lock(nn, NFP_NET_CFG_MULTICAST_SZ);
- if (ret)
- return ret;
-
- nn_writel(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_MULTICAST_MAC_HI,
- get_unaligned_be32(addr));
- nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_MULTICAST_MAC_LO,
- get_unaligned_be16(addr + 4));
-
- return nfp_net_mbox_reconfig_and_unlock(nn, entry->cmd);
-}
-
-static int nfp_net_mc_sync(struct net_device *netdev, const unsigned char *addr)
-{
- struct nfp_net *nn = netdev_priv(netdev);
-
- if (netdev_mc_count(netdev) > NFP_NET_CFG_MAC_MC_MAX) {
- nn_err(nn, "Requested number of MC addresses (%d) exceeds maximum (%d).\n",
- netdev_mc_count(netdev), NFP_NET_CFG_MAC_MC_MAX);
- return -EINVAL;
- }
-
- return nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_MULTICAST_ADD, addr,
- NFP_NET_CFG_MULTICAST_SZ, nfp_net_mc_cfg);
-}
-
-static int nfp_net_mc_unsync(struct net_device *netdev, const unsigned char *addr)
-{
- struct nfp_net *nn = netdev_priv(netdev);
-
- return nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_MULTICAST_DEL, addr,
- NFP_NET_CFG_MULTICAST_SZ, nfp_net_mc_cfg);
-}
-
static void nfp_net_set_rx_mode(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
--
2.34.1
Powered by blists - more mailing lists