lists.openwall.net | lists / announce owl-users owl-dev john-users john-dev passwdqc-users yescrypt popa3d-users / oss-security kernel-hardening musl sabotage tlsify passwords / crypt-dev xvendor / Bugtraq Full-Disclosure linux-kernel linux-netdev linux-ext4 linux-hardening linux-cve-announce PHC | |
Open Source and information security mailing list archives
| ||
|
Message-ID: <Y+5IEDPwVfT9O8Fx@boxer> Date: Thu, 16 Feb 2023 16:13:20 +0100 From: Maciej Fijalkowski <maciej.fijalkowski@...el.com> To: Saeed Mahameed <saeed@...nel.org> CC: "David S. Miller" <davem@...emloft.net>, Jakub Kicinski <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>, Eric Dumazet <edumazet@...gle.com>, "Saeed Mahameed" <saeedm@...dia.com>, <netdev@...r.kernel.org>, Tariq Toukan <tariqt@...dia.com>, Dragos Tatulea <dtatulea@...dia.com> Subject: Re: [net-next 3/9] net/mlx5e: Remove redundant page argument in mlx5e_xdp_handle() On Wed, Feb 15, 2023 at 04:09:12PM -0800, Saeed Mahameed wrote: > From: Tariq Toukan <tariqt@...dia.com> > > Remove the page parameter, it can be derived from the xdp_buff member > of mlx5e_xdp_buff. Okay that's nice cleanup. How about squashing this with the previous patch that does the same thing for xmit xdp_buff routine? > > Signed-off-by: Tariq Toukan <tariqt@...dia.com> > Reviewed-by: Dragos Tatulea <dtatulea@...dia.com> > Signed-off-by: Saeed Mahameed <saeedm@...dia.com> > --- > drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c | 4 ++-- > drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h | 2 +- > drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c | 4 ++-- > drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 10 ++++------ > 4 files changed, 9 insertions(+), 11 deletions(-) > > diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c > index 4b9cd8ef8d28..bcd6370de440 100644 > --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c > +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c > @@ -186,7 +186,7 @@ const struct xdp_metadata_ops mlx5e_xdp_metadata_ops = { > }; > > /* returns true if packet was consumed by xdp */ > -bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page, > +bool mlx5e_xdp_handle(struct mlx5e_rq *rq, > struct bpf_prog *prog, struct mlx5e_xdp_buff *mxbuf) > { > struct xdp_buff *xdp = &mxbuf->xdp; > @@ -210,7 +210,7 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page, > __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); > __set_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags); > if (xdp->rxq->mem.type != MEM_TYPE_XSK_BUFF_POOL) > - mlx5e_page_dma_unmap(rq, page); > + mlx5e_page_dma_unmap(rq, virt_to_page(xdp->data)); > rq->stats->xdp_redirect++; > return true; > default: > diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h > index 69f338bc0633..10bcfa6f88c1 100644 > --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h > +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h > @@ -52,7 +52,7 @@ struct mlx5e_xdp_buff { > > struct mlx5e_xsk_param; > int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk); > -bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page, > +bool mlx5e_xdp_handle(struct mlx5e_rq *rq, > struct bpf_prog *prog, struct mlx5e_xdp_buff *mlctx); > void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq); > bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq); > diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c > index b7c84ebe8418..fab787600459 100644 > --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c > +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c > @@ -289,7 +289,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, > */ > > prog = rcu_dereference(rq->xdp_prog); > - if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, mxbuf))) { > + if (likely(prog && mlx5e_xdp_handle(rq, prog, mxbuf))) { > if (likely(__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))) > __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */ > return NULL; /* page/packet was consumed by XDP */ > @@ -323,7 +323,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, > net_prefetch(mxbuf->xdp.data); > > prog = rcu_dereference(rq->xdp_prog); > - if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, mxbuf))) > + if (likely(prog && mlx5e_xdp_handle(rq, prog, mxbuf))) > return NULL; /* page/packet was consumed by XDP */ > > /* XDP_PASS: copy the data from the UMEM to a new SKB. The frame reuse > diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c > index 9ac2c7778b5b..ac570945d5d2 100644 > --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c > +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c > @@ -1610,7 +1610,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, > > net_prefetchw(va); /* xdp_frame data area */ > mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, cqe_bcnt, &mxbuf); > - if (mlx5e_xdp_handle(rq, au->page, prog, &mxbuf)) > + if (mlx5e_xdp_handle(rq, prog, &mxbuf)) > return NULL; /* page/packet was consumed by XDP */ > > rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start; > @@ -1698,10 +1698,8 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi > wi++; > } > > - au = head_wi->au; > - > prog = rcu_dereference(rq->xdp_prog); > - if (prog && mlx5e_xdp_handle(rq, au->page, prog, &mxbuf)) { > + if (prog && mlx5e_xdp_handle(rq, prog, &mxbuf)) { > if (test_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { > int i; > > @@ -1718,7 +1716,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi > if (unlikely(!skb)) > return NULL; > > - page_ref_inc(au->page); > + page_ref_inc(head_wi->au->page); > > if (unlikely(xdp_buff_has_frags(&mxbuf.xdp))) { > int i; > @@ -2013,7 +2011,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, > > net_prefetchw(va); /* xdp_frame data area */ > mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, cqe_bcnt, &mxbuf); > - if (mlx5e_xdp_handle(rq, au->page, prog, &mxbuf)) { > + if (mlx5e_xdp_handle(rq, prog, &mxbuf)) { > if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) > __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */ > return NULL; /* page/packet was consumed by XDP */ > -- > 2.39.1 >
Powered by blists - more mailing lists