lists.openwall.net | lists / announce owl-users owl-dev john-users john-dev passwdqc-users yescrypt popa3d-users / oss-security kernel-hardening musl sabotage tlsify passwords / crypt-dev xvendor / Bugtraq Full-Disclosure linux-kernel linux-netdev linux-ext4 linux-hardening linux-cve-announce PHC | |
Open Source and information security mailing list archives
| ||
|
Message-Id: <20230328205623.142075-7-saeed@kernel.org> Date: Tue, 28 Mar 2023 13:56:14 -0700 From: Saeed Mahameed <saeed@...nel.org> To: "David S. Miller" <davem@...emloft.net>, Jakub Kicinski <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>, Eric Dumazet <edumazet@...gle.com> Cc: Saeed Mahameed <saeedm@...dia.com>, netdev@...r.kernel.org, Tariq Toukan <tariqt@...dia.com>, Jesper Dangaard Brouer <brouer@...hat.com>, Matthew Wilcox <willy@...radead.org>, Toke Høiland-Jørgensen <toke@...hat.com>, Ilias Apalodimas <ilias.apalodimas@...aro.org>, Dragos Tatulea <dtatulea@...dia.com> Subject: [net-next 06/15] net/mlx5e: RX, Enable dma map and sync from page_pool allocator From: Dragos Tatulea <dtatulea@...dia.com> Remove driver dma mapping and unmapping of pages. Let the page_pool api do it. Signed-off-by: Dragos Tatulea <dtatulea@...dia.com> Reviewed-by: Tariq Toukan <tariqt@...dia.com> Signed-off-by: Saeed Mahameed <saeedm@...dia.com> --- .../net/ethernet/mellanox/mlx5/core/en/txrx.h | 1 - .../net/ethernet/mellanox/mlx5/core/en/xdp.c | 2 -- .../net/ethernet/mellanox/mlx5/core/en_main.c | 6 +++-- .../net/ethernet/mellanox/mlx5/core/en_rx.c | 22 ------------------- 4 files changed, 4 insertions(+), 27 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index dab00a2c2eb7..04419f56ac85 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -65,7 +65,6 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget); int mlx5e_poll_ico_cq(struct mlx5e_cq *cq); /* RX */ -void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct page *page); void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, struct page *page, bool recycle); INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)); INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index c5dae48b7932..5e6ef602c748 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -209,8 +209,6 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, goto xdp_abort; __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); __set_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags); - if (xdp->rxq->mem.type != MEM_TYPE_XSK_BUFF_POOL) - mlx5e_page_dma_unmap(rq, virt_to_page(xdp->data)); rq->stats->xdp_redirect++; return true; default: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index b0322a20b71b..2a73680021c2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -733,7 +733,6 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params, struct mlx5e_rq_param *rqp, int node, struct mlx5e_rq *rq) { - struct page_pool_params pp_params = { 0 }; struct mlx5_core_dev *mdev = rq->mdev; void *rqc = rqp->rqc; void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq); @@ -829,12 +828,15 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params, xsk_pool_set_rxq_info(rq->xsk_pool, &rq->xdp_rxq); } else { /* Create a page_pool and register it with rxq */ + struct page_pool_params pp_params = { 0 }; + pp_params.order = 0; - pp_params.flags = 0; /* No-internal DMA mapping in page_pool */ + pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; pp_params.pool_size = pool_size; pp_params.nid = node; pp_params.dev = rq->pdev; pp_params.dma_dir = rq->buff.map_dir; + pp_params.max_len = PAGE_SIZE; /* page_pool can be used even when there is no rq->xdp_prog, * given page_pool does not handle DMA mapping there is no diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 192f12a7d9a9..01c789b89cb9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -273,40 +273,18 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq, static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq, struct page **pagep) { - dma_addr_t addr; - *pagep = page_pool_dev_alloc_pages(rq->page_pool); if (unlikely(!*pagep)) return -ENOMEM; - /* Non-XSK always uses PAGE_SIZE. */ - addr = dma_map_page(rq->pdev, *pagep, 0, PAGE_SIZE, rq->buff.map_dir); - if (unlikely(dma_mapping_error(rq->pdev, addr))) { - page_pool_recycle_direct(rq->page_pool, *pagep); - *pagep = NULL; - return -ENOMEM; - } - page_pool_set_dma_addr(*pagep, addr); - return 0; } -void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct page *page) -{ - dma_addr_t dma_addr = page_pool_get_dma_addr(page); - - dma_unmap_page_attrs(rq->pdev, dma_addr, PAGE_SIZE, rq->buff.map_dir, - DMA_ATTR_SKIP_CPU_SYNC); - page_pool_set_dma_addr(page, 0); -} - void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, struct page *page, bool recycle) { if (likely(recycle)) { - mlx5e_page_dma_unmap(rq, page); page_pool_recycle_direct(rq->page_pool, page); } else { - mlx5e_page_dma_unmap(rq, page); page_pool_release_page(rq->page_pool, page); put_page(page); } -- 2.39.2
Powered by blists - more mailing lists