[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230206100837.451300-4-vladimir.oltean@nxp.com>
Date: Mon, 6 Feb 2023 12:08:29 +0200
From: Vladimir Oltean <vladimir.oltean@....com>
To: netdev@...r.kernel.org
Cc: "David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Claudiu Manoil <claudiu.manoil@....com>,
Björn Töpel <bjorn@...nel.org>,
Magnus Karlsson <magnus.karlsson@...el.com>,
Maciej Fijalkowski <maciej.fijalkowski@...el.com>,
Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Jesper Dangaard Brouer <hawk@...nel.org>,
John Fastabend <john.fastabend@...il.com>, bpf@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [RFC PATCH net-next 03/11] net: enetc: rename "cleaned_cnt" to "buffs_missing"
Calling enetc_bd_unused() on an RX ring returns the number of
descriptors necessary for the ring to be full with descriptors owned by
hardware (for it to put packets in).
Putting this value in a variable named "cleaned_cnt" is misleading to me,
especially since we may start the NAPI poll routine (enetc_clean_rx_ring)
with a non-zero cleaned_cnt.
Signed-off-by: Vladimir Oltean <vladimir.oltean@....com>
---
drivers/net/ethernet/freescale/enetc/enetc.c | 41 ++++++++++----------
1 file changed, 21 insertions(+), 20 deletions(-)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 2d8f79ddb78f..4a81a23539fb 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -1145,7 +1145,8 @@ static bool enetc_check_bd_errors_and_consume(struct enetc_bdr *rx_ring,
static struct sk_buff *enetc_build_skb(struct enetc_bdr *rx_ring,
u32 bd_status, union enetc_rx_bd **rxbd,
- int *i, int *cleaned_cnt, int buffer_size)
+ int *i, int *buffs_missing,
+ int buffer_size)
{
struct sk_buff *skb;
u16 size;
@@ -1157,7 +1158,7 @@ static struct sk_buff *enetc_build_skb(struct enetc_bdr *rx_ring,
enetc_get_offloads(rx_ring, *rxbd, skb);
- (*cleaned_cnt)++;
+ (*buffs_missing)++;
enetc_rxbd_next(rx_ring, rxbd, i);
@@ -1173,7 +1174,7 @@ static struct sk_buff *enetc_build_skb(struct enetc_bdr *rx_ring,
enetc_add_rx_buff_to_skb(rx_ring, *i, size, skb);
- (*cleaned_cnt)++;
+ (*buffs_missing)++;
enetc_rxbd_next(rx_ring, rxbd, i);
}
@@ -1190,9 +1191,9 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
struct napi_struct *napi, int work_limit)
{
int rx_frm_cnt = 0, rx_byte_cnt = 0;
- int cleaned_cnt, i;
+ int buffs_missing, i;
- cleaned_cnt = enetc_bd_unused(rx_ring);
+ buffs_missing = enetc_bd_unused(rx_ring);
/* next descriptor to process */
i = rx_ring->next_to_clean;
@@ -1201,9 +1202,9 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
struct sk_buff *skb;
u32 bd_status;
- if (cleaned_cnt >= ENETC_RXBD_BUNDLE)
- cleaned_cnt -= enetc_refill_rx_ring(rx_ring,
- cleaned_cnt);
+ if (buffs_missing >= ENETC_RXBD_BUNDLE)
+ buffs_missing -= enetc_refill_rx_ring(rx_ring,
+ buffs_missing);
rxbd = enetc_rxbd(rx_ring, i);
bd_status = le32_to_cpu(rxbd->r.lstatus);
@@ -1218,7 +1219,7 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
break;
skb = enetc_build_skb(rx_ring, bd_status, &rxbd, &i,
- &cleaned_cnt, ENETC_RXB_DMA_SIZE);
+ &buffs_missing, ENETC_RXB_DMA_SIZE);
if (!skb)
break;
@@ -1447,14 +1448,14 @@ static void enetc_add_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i,
static void enetc_build_xdp_buff(struct enetc_bdr *rx_ring, u32 bd_status,
union enetc_rx_bd **rxbd, int *i,
- int *cleaned_cnt, struct xdp_buff *xdp_buff)
+ int *buffs_missing, struct xdp_buff *xdp_buff)
{
u16 size = le16_to_cpu((*rxbd)->r.buf_len);
xdp_init_buff(xdp_buff, ENETC_RXB_TRUESIZE, &rx_ring->xdp.rxq);
enetc_map_rx_buff_to_xdp(rx_ring, *i, xdp_buff, size);
- (*cleaned_cnt)++;
+ (*buffs_missing)++;
enetc_rxbd_next(rx_ring, rxbd, i);
/* not last BD in frame? */
@@ -1468,7 +1469,7 @@ static void enetc_build_xdp_buff(struct enetc_bdr *rx_ring, u32 bd_status,
}
enetc_add_rx_buff_to_xdp(rx_ring, *i, size, xdp_buff);
- (*cleaned_cnt)++;
+ (*buffs_missing)++;
enetc_rxbd_next(rx_ring, rxbd, i);
}
}
@@ -1524,16 +1525,16 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev);
int rx_frm_cnt = 0, rx_byte_cnt = 0;
struct enetc_bdr *tx_ring;
- int cleaned_cnt, i;
+ int buffs_missing, i;
u32 xdp_act;
- cleaned_cnt = enetc_bd_unused(rx_ring);
+ buffs_missing = enetc_bd_unused(rx_ring);
/* next descriptor to process */
i = rx_ring->next_to_clean;
while (likely(rx_frm_cnt < work_limit)) {
union enetc_rx_bd *rxbd, *orig_rxbd;
- int orig_i, orig_cleaned_cnt;
+ int orig_i, orig_buffs_missing;
struct xdp_buff xdp_buff;
struct sk_buff *skb;
u32 bd_status;
@@ -1552,11 +1553,11 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
break;
orig_rxbd = rxbd;
- orig_cleaned_cnt = cleaned_cnt;
+ orig_buffs_missing = buffs_missing;
orig_i = i;
enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i,
- &cleaned_cnt, &xdp_buff);
+ &buffs_missing, &xdp_buff);
xdp_act = bpf_prog_run_xdp(prog, &xdp_buff);
@@ -1572,11 +1573,11 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
break;
case XDP_PASS:
rxbd = orig_rxbd;
- cleaned_cnt = orig_cleaned_cnt;
+ buffs_missing = orig_buffs_missing;
i = orig_i;
skb = enetc_build_skb(rx_ring, bd_status, &rxbd,
- &i, &cleaned_cnt,
+ &i, &buffs_missing,
ENETC_RXB_DMA_SIZE_XDP);
if (unlikely(!skb))
goto out;
@@ -1640,7 +1641,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
if (xdp_tx_frm_cnt)
enetc_update_tx_ring_tail(tx_ring);
- if (cleaned_cnt > rx_ring->xdp.xdp_tx_in_flight)
+ if (buffs_missing > rx_ring->xdp.xdp_tx_in_flight)
enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring) -
rx_ring->xdp.xdp_tx_in_flight);
--
2.34.1
Powered by blists - more mailing lists