[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <504C9EFCA2D0054393414C9CB605C37F20CBC237@SJEXCHMB06.corp.ad.broadcom.com>
Date: Tue, 18 Jun 2013 10:45:05 +0000
From: "Dmitry Kravkov" <dmitry@...adcom.com>
To: "Eric Dumazet" <eric.dumazet@...il.com>
cc: "davem@...emloft.net" <davem@...emloft.net>,
"netdev@...r.kernel.org" <netdev@...r.kernel.org>,
"Eilon Greenstein" <eilong@...adcom.com>
Subject: RE: [PATCH net-next 1/2] bnx2x: add support for ndo_ll_poll
> -----Original Message-----
> From: Eric Dumazet [mailto:eric.dumazet@...il.com]
> Sent: Tuesday, June 18, 2013 11:11 AM
> To: Dmitry Kravkov
> Cc: davem@...emloft.net; netdev@...r.kernel.org; Eilon Greenstein
> Subject: Re: [PATCH net-next 1/2] bnx2x: add support for ndo_ll_poll
>
> On Tue, 2013-06-18 at 10:42 +0300, Dmitry Kravkov wrote:
> > Adds ndo_ll_poll method and locking for FPs between LL and the napi.
> >
> > When receiving a packet we use skb_mark_ll to record the napi it came from.
> > Add each napi to the napi_hash right after netif_napi_add().
> >
> > Signed-off-by: Dmitry Kravkov <dmitry@...adcom.com>
> > Signed-off-by: Eilon Greenstein <eilong@...adcom.com>
> > ---
> > drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | 125 +++++++++++++++++++++++
> > drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 73 +++++++++++--
> > drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | 23 ++++-
> > drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 4 +
> > 4 files changed, 213 insertions(+), 12 deletions(-)
> >
> > diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
> > index f76597e..a295a53 100644
> > --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
> > +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
> > @@ -485,6 +485,21 @@ struct bnx2x_fastpath {
> > struct bnx2x *bp; /* parent */
> >
> > struct napi_struct napi;
> > +
> > +#ifdef CONFIG_NET_LL_RX_POLL
> > + unsigned int state;
> > +#define BNX2X_FP_STATE_IDLE 0
> > +#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */
> > +#define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */
> > +#define BNX2X_FP_STATE_NAPI_YIELD (1 << 2) /* NAPI yielded this FP */
> > +#define BNX2X_FP_STATE_POLL_YIELD (1 << 3) /* poll yielded this FP */
> > +#define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD)
> > +#define BNX2X_FP_LOCKED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL)
> > +#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD)
> > + /* protect state */
> > + spinlock_t lock;
> > +#endif /* CONFIG_NET_LL_RX_POLL */
> > +
> > union host_hc_status_block status_blk;
> > /* chip independent shortcuts into sb structure */
> > __le16 *sb_index_values;
> > @@ -557,6 +572,116 @@ struct bnx2x_fastpath {
> > #define bnx2x_fp_stats(bp, fp) (&((bp)->fp_stats[(fp)->index]))
> > #define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats))
> >
> > +#ifdef CONFIG_NET_LL_RX_POLL
> > +static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
> > +{
> > + spin_lock_init(&fp->lock);
> > + fp->state = BNX2X_FP_STATE_IDLE;
> > +}
> > +
> > +/* called from the device poll routine to get ownership of a FP */
> > +static inline int bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
>
> static inline bool ?
>
> > +{
> > + int rc = true;
> > +
> > + spin_lock(&fp->lock);
> > + if (fp->state & BNX2X_FP_LOCKED) {
> > + WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
> > + fp->state |= BNX2X_FP_STATE_NAPI_YIELD;
> > + rc = false;
> > + } else {
> > + /* we don't care if someone yielded */
> > + fp->state = BNX2X_FP_STATE_NAPI;
> > + }
> > + spin_unlock(&fp->lock);
> > + return rc;
> > +}
> > +
> > +/* returns true is someone tried to get the FP while napi had it */
> > +static inline int bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
>
> bool
>
> > +{
> > + int rc = false;
> > +
> > + spin_lock(&fp->lock);
> > + WARN_ON(fp->state &
> > + (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD));
> > +
> > + if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
> > + rc = true;
> > + fp->state = BNX2X_FP_STATE_IDLE;
> > + spin_unlock(&fp->lock);
> > + return rc;
> > +}
> > +
> > +/* called from bnx2x_low_latency_poll() */
> > +static inline int bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
>
> bool
>
> > +{
> > + int rc = true;
> > +
> > + spin_lock_bh(&fp->lock);
> > + if ((fp->state & BNX2X_FP_LOCKED)) {
> > + fp->state |= BNX2X_FP_STATE_POLL_YIELD;
> > + rc = false;
> > + } else {
> > + /* preserve yield marks */
> > + fp->state |= BNX2X_FP_STATE_POLL;
> > + }
> > + spin_unlock_bh(&fp->lock);
> > + return rc;
> > +}
> > +
> > +/* returns true if someone tried to get the FP while it was locked */
> > +static inline int bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
> bool
>
> > +{
> > + int rc = false;
> > +
> > + spin_lock_bh(&fp->lock);
> > + WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
> > +
> > + if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
> > + rc = true;
> > + fp->state = BNX2X_FP_STATE_IDLE;
> > + spin_unlock_bh(&fp->lock);
> > + return rc;
> > +}
> > +
> > +/* true if a socket is polling, even if it did not get the lock */
> > +static inline int bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
> bool
>
> > +{
> > + WARN_ON(!(fp->state & BNX2X_FP_LOCKED));
> > + return fp->state & BNX2X_FP_USER_PEND;
> > +}
> > +#else
> > +static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
> > +{
> > +}
> > +
> > +static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
> > +{
> > + return true;
> > +}
> > +
> > +static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
> > +{
> > + return false;
> > +}
> > +
> > +static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
> > +{
> > + return false;
> > +}
> > +
> > +static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
> > +{
> > + return false;
> > +}
> > +
> > +static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
> > +{
> > + return false;
> > +}
> > +#endif /* CONFIG_NET_LL_RX_POLL */
> > +
> > /* Use 2500 as a mini-jumbo MTU for FCoE */
> > #define BNX2X_FCOE_MINI_JUMBO_MTU 2500
> >
> > diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
> > index 4e42bdd..d8d371b 100644
> > --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
> > +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
> > @@ -24,6 +24,7 @@
> > #include <net/tcp.h>
> > #include <net/ipv6.h>
> > #include <net/ip6_checksum.h>
> > +#include <net/ll_poll.h>
> > #include <linux/prefetch.h>
> > #include "bnx2x_cmn.h"
> > #include "bnx2x_init.h"
> > @@ -669,7 +670,12 @@ static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
> > }
> > }
> > #endif
> > - napi_gro_receive(&fp->napi, skb);
> > + skb_mark_ll(skb, &fp->napi);
> > +
> > + if (bnx2x_fp_ll_polling(fp))
> > + netif_receive_skb(skb);
> > + else
> > + napi_gro_receive(&fp->napi, skb);
> > }
> >
>
> This is racy [1], so I would not care and always call napi_gro_receive()
>
> [1] We would have to flush GRO state every time we call
> bnx2x_fp_lock_poll()
>
> Ideally, we could keep a counter of enabled LLS sockets, to
> automatically switch off/on GRO, but in net/core, not in every driver.
Since we have GRO in FW it will be hard to disable/enable it on the fly, probably better to return LL_FLUSH_FAILED when GRO is enabled?
>
>
Powered by blists - more mailing lists