lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Thu, 28 Jul 2022 19:57:44 +0100
From:   <ecree@...inx.com>
To:     <davem@...emloft.net>, <kuba@...nel.org>, <pabeni@...hat.com>,
        <linux-net-drivers@....com>
CC:     <netdev@...r.kernel.org>, Edward Cree <ecree.xilinx@...il.com>
Subject: [PATCH net-next v3 02/10] sfc: ef100 representor RX NAPI poll

From: Edward Cree <ecree.xilinx@...il.com>

This patch adds the 'bottom half' napi->poll routine for representor RX.
See the next patch (with the top half) for an explanation of the 'fake
 interrupt' scheme used to drive this NAPI context.

Signed-off-by: Edward Cree <ecree.xilinx@...il.com>
---
 drivers/net/ethernet/sfc/ef100_rep.c | 64 ++++++++++++++++++++++++++++
 drivers/net/ethernet/sfc/ef100_rep.h | 11 +++++
 2 files changed, 75 insertions(+)

diff --git a/drivers/net/ethernet/sfc/ef100_rep.c b/drivers/net/ethernet/sfc/ef100_rep.c
index 102071ed051b..fe45ae963391 100644
--- a/drivers/net/ethernet/sfc/ef100_rep.c
+++ b/drivers/net/ethernet/sfc/ef100_rep.c
@@ -16,12 +16,16 @@
 
 #define EFX_EF100_REP_DRIVER	"efx_ef100_rep"
 
+static int efx_ef100_rep_poll(struct napi_struct *napi, int weight);
+
 static int efx_ef100_rep_init_struct(struct efx_nic *efx, struct efx_rep *efv,
 				     unsigned int i)
 {
 	efv->parent = efx;
 	efv->idx = i;
 	INIT_LIST_HEAD(&efv->list);
+	INIT_LIST_HEAD(&efv->rx_list);
+	spin_lock_init(&efv->rx_lock);
 	efv->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
 			  NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
 			  NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
@@ -29,6 +33,25 @@ static int efx_ef100_rep_init_struct(struct efx_nic *efx, struct efx_rep *efv,
 	return 0;
 }
 
+static int efx_ef100_rep_open(struct net_device *net_dev)
+{
+	struct efx_rep *efv = netdev_priv(net_dev);
+
+	netif_napi_add(net_dev, &efv->napi, efx_ef100_rep_poll,
+		       NAPI_POLL_WEIGHT);
+	napi_enable(&efv->napi);
+	return 0;
+}
+
+static int efx_ef100_rep_close(struct net_device *net_dev)
+{
+	struct efx_rep *efv = netdev_priv(net_dev);
+
+	napi_disable(&efv->napi);
+	netif_napi_del(&efv->napi);
+	return 0;
+}
+
 static netdev_tx_t efx_ef100_rep_xmit(struct sk_buff *skb,
 				      struct net_device *dev)
 {
@@ -93,6 +116,8 @@ static void efx_ef100_rep_get_stats64(struct net_device *dev,
 }
 
 static const struct net_device_ops efx_ef100_rep_netdev_ops = {
+	.ndo_open		= efx_ef100_rep_open,
+	.ndo_stop		= efx_ef100_rep_close,
 	.ndo_start_xmit		= efx_ef100_rep_xmit,
 	.ndo_get_port_parent_id	= efx_ef100_rep_get_port_parent_id,
 	.ndo_get_phys_port_name	= efx_ef100_rep_get_phys_port_name,
@@ -256,3 +281,42 @@ void efx_ef100_fini_vfreps(struct efx_nic *efx)
 	list_for_each_entry_safe(efv, next, &efx->vf_reps, list)
 		efx_ef100_vfrep_destroy(efx, efv);
 }
+
+static int efx_ef100_rep_poll(struct napi_struct *napi, int weight)
+{
+	struct efx_rep *efv = container_of(napi, struct efx_rep, napi);
+	unsigned int read_index;
+	struct list_head head;
+	struct sk_buff *skb;
+	bool need_resched;
+	int spent = 0;
+
+	INIT_LIST_HEAD(&head);
+	/* Grab up to 'weight' pending SKBs */
+	spin_lock_bh(&efv->rx_lock);
+	read_index = efv->write_index;
+	while (spent < weight && !list_empty(&efv->rx_list)) {
+		skb = list_first_entry(&efv->rx_list, struct sk_buff, list);
+		list_del(&skb->list);
+		list_add_tail(&skb->list, &head);
+		spent++;
+	}
+	spin_unlock_bh(&efv->rx_lock);
+	/* Receive them */
+	netif_receive_skb_list(&head);
+	if (spent < weight)
+		if (napi_complete_done(napi, spent)) {
+			spin_lock_bh(&efv->rx_lock);
+			efv->read_index = read_index;
+			/* If write_index advanced while we were doing the
+			 * RX, then storing our read_index won't re-prime the
+			 * fake-interrupt.  In that case, we need to schedule
+			 * NAPI again to consume the additional packet(s).
+			 */
+			need_resched = efv->write_index != read_index;
+			spin_unlock_bh(&efv->rx_lock);
+			if (need_resched)
+				napi_schedule(&efv->napi);
+		}
+	return spent;
+}
diff --git a/drivers/net/ethernet/sfc/ef100_rep.h b/drivers/net/ethernet/sfc/ef100_rep.h
index d47fd8ff6220..77037ab22052 100644
--- a/drivers/net/ethernet/sfc/ef100_rep.h
+++ b/drivers/net/ethernet/sfc/ef100_rep.h
@@ -29,7 +29,13 @@ struct efx_rep_sw_stats {
  * @msg_enable: log message enable flags
  * @mport: m-port ID of corresponding VF
  * @idx: VF index
+ * @write_index: number of packets enqueued to @rx_list
+ * @read_index: number of packets consumed from @rx_list
+ * @rx_pring_size: max length of RX list
  * @list: entry on efx->vf_reps
+ * @rx_list: list of SKBs queued for receive in NAPI poll
+ * @rx_lock: protects @rx_list
+ * @napi: NAPI control structure
  * @stats: software traffic counters for netdev stats
  */
 struct efx_rep {
@@ -38,7 +44,12 @@ struct efx_rep {
 	u32 msg_enable;
 	u32 mport;
 	unsigned int idx;
+	unsigned int write_index, read_index;
+	unsigned int rx_pring_size;
 	struct list_head list;
+	struct list_head rx_list;
+	spinlock_t rx_lock;
+	struct napi_struct napi;
 	struct efx_rep_sw_stats stats;
 };
 

Powered by blists - more mailing lists