[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CANn89iKytfwyax_d+7U8Xw-Wvj5z1d7xoi4LNhmUQphDiborDQ@mail.gmail.com>
Date: Thu, 10 Oct 2024 06:20:57 +0200
From: Eric Dumazet <edumazet@...gle.com>
To: Joe Damato <jdamato@...tly.com>
Cc: netdev@...r.kernel.org, mkarsten@...terloo.ca, skhawaja@...gle.com,
sdf@...ichev.me, bjorn@...osinc.com, amritha.nambiar@...el.com,
sridhar.samudrala@...el.com, willemdebruijn.kernel@...il.com,
"David S. Miller" <davem@...emloft.net>, Jakub Kicinski <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>,
Jonathan Corbet <corbet@....net>, Jiri Pirko <jiri@...nulli.us>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>, Lorenzo Bianconi <lorenzo@...nel.org>,
Kory Maincent <kory.maincent@...tlin.com>, Johannes Berg <johannes.berg@...el.com>,
"open list:DOCUMENTATION" <linux-doc@...r.kernel.org>, open list <linux-kernel@...r.kernel.org>
Subject: Re: [net-next v5 5/9] net: napi: Add napi_config
On Wed, Oct 9, 2024 at 2:56 AM Joe Damato <jdamato@...tly.com> wrote:
>
> Add a persistent NAPI config area for NAPI configuration to the core.
> Drivers opt-in to setting the persistent config for a NAPI by passing an
> index when calling netif_napi_add_config.
>
> napi_config is allocated in alloc_netdev_mqs, freed in free_netdev
> (after the NAPIs are deleted).
>
> Drivers which call netif_napi_add_config will have persistent per-NAPI
> settings: NAPI IDs, gro_flush_timeout, and defer_hard_irq settings.
>
> Per-NAPI settings are saved in napi_disable and restored in napi_enable.
>
> Co-developed-by: Martin Karsten <mkarsten@...terloo.ca>
> Signed-off-by: Martin Karsten <mkarsten@...terloo.ca>
> Signed-off-by: Joe Damato <jdamato@...tly.com>
> ---
> .../networking/net_cachelines/net_device.rst | 1 +
> include/linux/netdevice.h | 36 ++++++++-
> net/core/dev.c | 79 +++++++++++++++++--
> net/core/dev.h | 12 +++
> 4 files changed, 118 insertions(+), 10 deletions(-)
>
> diff --git a/Documentation/networking/net_cachelines/net_device.rst b/Documentation/networking/net_cachelines/net_device.rst
> index 3ab663b6cf16..9d86720cb722 100644
> --- a/Documentation/networking/net_cachelines/net_device.rst
> +++ b/Documentation/networking/net_cachelines/net_device.rst
> @@ -183,5 +183,6 @@ struct_dpll_pin* dpll_pin
> struct hlist_head page_pools
> struct dim_irq_moder* irq_moder
> u64 max_pacing_offload_horizon
> +struct_napi_config* napi_config
> unsigned_long gro_flush_timeout
> u32 napi_defer_hard_irqs
> diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
> index 4239a4a9d295..b65a901ab4e7 100644
> --- a/include/linux/netdevice.h
> +++ b/include/linux/netdevice.h
> @@ -342,6 +342,15 @@ struct gro_list {
> */
> #define GRO_HASH_BUCKETS 8
>
> +/*
> + * Structure for per-NAPI config
> + */
> +struct napi_config {
> + u64 gro_flush_timeout;
> + u32 defer_hard_irqs;
> + unsigned int napi_id;
> +};
> +
> /*
> * Structure for NAPI scheduling similar to tasklet but with weighting
> */
> @@ -379,6 +388,8 @@ struct napi_struct {
> struct list_head dev_list;
> struct hlist_node napi_hash_node;
> int irq;
> + int index;
> + struct napi_config *config;
> };
>
> enum {
> @@ -1860,9 +1871,6 @@ enum netdev_reg_state {
> * allocated at register_netdev() time
> * @real_num_rx_queues: Number of RX queues currently active in device
> * @xdp_prog: XDP sockets filter program pointer
> - * @gro_flush_timeout: timeout for GRO layer in NAPI
> - * @napi_defer_hard_irqs: If not zero, provides a counter that would
> - * allow to avoid NIC hard IRQ, on busy queues.
> *
> * @rx_handler: handler for received packets
> * @rx_handler_data: XXX: need comments on this one
> @@ -2012,6 +2020,11 @@ enum netdev_reg_state {
> * where the clock is recovered.
> *
> * @max_pacing_offload_horizon: max EDT offload horizon in nsec.
> + * @napi_config: An array of napi_config structures containing per-NAPI
> + * settings.
> + * @gro_flush_timeout: timeout for GRO layer in NAPI
> + * @napi_defer_hard_irqs: If not zero, provides a counter that would
> + * allow to avoid NIC hard IRQ, on busy queues.
> *
> * FIXME: cleanup struct net_device such that network protocol info
> * moves out.
> @@ -2405,6 +2418,7 @@ struct net_device {
> struct dim_irq_moder *irq_moder;
>
> u64 max_pacing_offload_horizon;
> + struct napi_config *napi_config;
> unsigned long gro_flush_timeout;
> u32 napi_defer_hard_irqs;
>
> @@ -2657,6 +2671,22 @@ netif_napi_add_tx_weight(struct net_device *dev,
> netif_napi_add_weight(dev, napi, poll, weight);
> }
>
> +/**
> + * netif_napi_add_config - initialize a NAPI context with persistent config
> + * @dev: network device
> + * @napi: NAPI context
> + * @poll: polling function
> + * @index: the NAPI index
> + */
> +static inline void
> +netif_napi_add_config(struct net_device *dev, struct napi_struct *napi,
> + int (*poll)(struct napi_struct *, int), int index)
> +{
> + napi->index = index;
> + napi->config = &dev->napi_config[index];
> + netif_napi_add_weight(dev, napi, poll, NAPI_POLL_WEIGHT);
> +}
> +
> /**
> * netif_napi_add_tx() - initialize a NAPI context to be used for Tx only
> * @dev: network device
> diff --git a/net/core/dev.c b/net/core/dev.c
> index fca2295f4d95..bd87232f7b37 100644
> --- a/net/core/dev.c
> +++ b/net/core/dev.c
> @@ -6503,6 +6503,22 @@ EXPORT_SYMBOL(napi_busy_loop);
>
> #endif /* CONFIG_NET_RX_BUSY_POLL */
>
> +static void __napi_hash_add_with_id(struct napi_struct *napi,
> + unsigned int napi_id)
> +{
> + napi->napi_id = napi_id;
> + hlist_add_head_rcu(&napi->napi_hash_node,
> + &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
> +}
> +
> +static void napi_hash_add_with_id(struct napi_struct *napi,
> + unsigned int napi_id)
> +{
> + spin_lock(&napi_hash_lock);
> + __napi_hash_add_with_id(napi, napi_id);
Hmmm... there is no check if 'napi_id' is already used and hashed.
I would add
WARN_ON_ONCE(napi_by_id(napi_id));
> + spin_unlock(&napi_hash_lock);
> +}
> +
> static void napi_hash_add(struct napi_struct *napi)
> {
> if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state))
> @@ -6515,10 +6531,8 @@ static void napi_hash_add(struct napi_struct *napi)
> if (unlikely(++napi_gen_id < MIN_NAPI_ID))
> napi_gen_id = MIN_NAPI_ID;
> } while (napi_by_id(napi_gen_id));
> - napi->napi_id = napi_gen_id;
>
> - hlist_add_head_rcu(&napi->napi_hash_node,
> - &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
> + __napi_hash_add_with_id(napi, napi_gen_id);
>
> spin_unlock(&napi_hash_lock);
> }
>
Powered by blists - more mailing lists