[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAJ+HfNhY0fMa2QiJJM0xnrzcPWw4ZYKoFjMrD03wfL0aKSnoyg@mail.gmail.com>
Date: Thu, 13 Jun 2019 14:50:06 +0200
From: Björn Töpel <bjorn.topel@...il.com>
To: Maxim Mikityanskiy <maximmi@...lanox.com>
Cc: Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Björn Töpel <bjorn.topel@...el.com>,
Magnus Karlsson <magnus.karlsson@...el.com>,
"bpf@...r.kernel.org" <bpf@...r.kernel.org>,
"netdev@...r.kernel.org" <netdev@...r.kernel.org>,
"David S. Miller" <davem@...emloft.net>,
Saeed Mahameed <saeedm@...lanox.com>,
Jonathan Lemon <bsd@...com>,
Tariq Toukan <tariqt@...lanox.com>,
Martin KaFai Lau <kafai@...com>,
Song Liu <songliubraving@...com>, Yonghong Song <yhs@...com>,
Jakub Kicinski <jakub.kicinski@...ronome.com>,
Maciej Fijalkowski <maciejromanfijalkowski@...il.com>
Subject: Re: [PATCH bpf-next v4 02/17] xsk: Add API to check for available
entries in FQ
On Wed, 12 Jun 2019 at 20:05, Maxim Mikityanskiy <maximmi@...lanox.com> wrote:
>
> Add a function that checks whether the Fill Ring has the specified
> amount of descriptors available. It will be useful for mlx5e that wants
> to check in advance, whether it can allocate a bulk of RX descriptors,
> to get the best performance.
>
Acked-by: Björn Töpel <bjorn.topel@...el.com>
> Signed-off-by: Maxim Mikityanskiy <maximmi@...lanox.com>
> Reviewed-by: Tariq Toukan <tariqt@...lanox.com>
> Acked-by: Saeed Mahameed <saeedm@...lanox.com>
> ---
> include/net/xdp_sock.h | 21 +++++++++++++++++++++
> net/xdp/xsk.c | 6 ++++++
> net/xdp/xsk_queue.h | 14 ++++++++++++++
> 3 files changed, 41 insertions(+)
>
> diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
> index ae0f368a62bb..b6f5ebae43a1 100644
> --- a/include/net/xdp_sock.h
> +++ b/include/net/xdp_sock.h
> @@ -77,6 +77,7 @@ int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
> void xsk_flush(struct xdp_sock *xs);
> bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs);
> /* Used from netdev driver */
> +bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt);
> u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
> void xsk_umem_discard_addr(struct xdp_umem *umem);
> void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
> @@ -99,6 +100,16 @@ static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
> }
>
> /* Reuse-queue aware version of FILL queue helpers */
> +static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
> +{
> + struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
> +
> + if (rq->length >= cnt)
> + return true;
> +
> + return xsk_umem_has_addrs(umem, cnt - rq->length);
> +}
> +
> static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
> {
> struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
> @@ -146,6 +157,11 @@ static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
> return false;
> }
>
> +static inline bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt)
> +{
> + return false;
> +}
> +
> static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
> {
> return NULL;
> @@ -200,6 +216,11 @@ static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
> return 0;
> }
>
> +static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
> +{
> + return false;
> +}
> +
> static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
> {
> return NULL;
> diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
> index a14e8864e4fa..b68a380f50b3 100644
> --- a/net/xdp/xsk.c
> +++ b/net/xdp/xsk.c
> @@ -37,6 +37,12 @@ bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
> READ_ONCE(xs->umem->fq);
> }
>
> +bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt)
> +{
> + return xskq_has_addrs(umem->fq, cnt);
> +}
> +EXPORT_SYMBOL(xsk_umem_has_addrs);
> +
> u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
> {
> return xskq_peek_addr(umem->fq, addr);
> diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
> index 88b9ae24658d..12b49784a6d5 100644
> --- a/net/xdp/xsk_queue.h
> +++ b/net/xdp/xsk_queue.h
> @@ -117,6 +117,20 @@ static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt)
> return q->nentries - (producer - q->cons_tail);
> }
>
> +static inline bool xskq_has_addrs(struct xsk_queue *q, u32 cnt)
> +{
> + u32 entries = q->prod_tail - q->cons_tail;
> +
> + if (entries >= cnt)
> + return true;
> +
> + /* Refresh the local pointer. */
> + q->prod_tail = READ_ONCE(q->ring->producer);
> + entries = q->prod_tail - q->cons_tail;
> +
> + return entries >= cnt;
> +}
> +
> /* UMEM queue */
>
> static inline bool xskq_is_valid_addr(struct xsk_queue *q, u64 addr)
> --
> 2.19.1
>
Powered by blists - more mailing lists