lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <a91d00eb-babc-de12-2413-c0d6a3b4dce6@gmail.com>
Date:   Thu, 18 May 2023 17:28:36 -0500
From:   Bob Pearson <rpearsonhpe@...il.com>
To:     Daisuke Matsuda <matsuda-daisuke@...itsu.com>,
        linux-rdma@...r.kernel.org, leonro@...dia.com, jgg@...dia.com,
        zyjzyj2000@...il.com
Cc:     linux-kernel@...r.kernel.org, yangx.jy@...itsu.com,
        lizhijian@...itsu.com, y-goto@...itsu.com
Subject: Re: [PATCH for-next v5 2/7] RDMA/rxe: Make MR functions accessible
 from other rxe source code

On 5/18/23 03:21, Daisuke Matsuda wrote:
> Some functions in rxe_mr.c are going to be used in rxe_odp.c, which is to
> be created in the subsequent patch. List the declarations of the functions
> in rxe_loc.h.
> 
> Signed-off-by: Daisuke Matsuda <matsuda-daisuke@...itsu.com>
> ---
>  drivers/infiniband/sw/rxe/rxe_loc.h | 14 ++++++++++++++
>  drivers/infiniband/sw/rxe/rxe_mr.c  | 18 ++++--------------
>  2 files changed, 18 insertions(+), 14 deletions(-)
> 
> diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
> index 804b15e929dd..00fedd1a4980 100644
> --- a/drivers/infiniband/sw/rxe/rxe_loc.h
> +++ b/drivers/infiniband/sw/rxe/rxe_loc.h
> @@ -60,7 +60,9 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
>  
>  /* rxe_mr.c */
>  u8 rxe_get_next_key(u32 last_key);
> +void rxe_mr_init(int access, struct rxe_mr *mr);
>  void rxe_mr_init_dma(int access, struct rxe_mr *mr);
> +int rxe_mr_fill_pages_from_sgt(struct rxe_mr *mr, struct sg_table *sgt);
>  int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
>  		     int access, struct rxe_mr *mr);
>  int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr);
> @@ -71,6 +73,8 @@ int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma,
>  	      void *addr, int length, enum rxe_mr_copy_dir dir);
>  int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
>  		  int sg_nents, unsigned int *sg_offset);
> +int rxe_mr_copy_xarray(struct rxe_mr *mr, u64 iova, void *addr,
> +		       unsigned int length, enum rxe_mr_copy_dir dir);
>  int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
>  			u64 compare, u64 swap_add, u64 *orig_val);
>  int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value);
> @@ -82,6 +86,16 @@ int rxe_invalidate_mr(struct rxe_qp *qp, u32 key);
>  int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe);
>  void rxe_mr_cleanup(struct rxe_pool_elem *elem);
>  
> +static inline unsigned long rxe_mr_iova_to_index(struct rxe_mr *mr, u64 iova)
> +{
> +	return (iova >> mr->page_shift) - (mr->ibmr.iova >> mr->page_shift);
> +}
> +
> +static inline unsigned long rxe_mr_iova_to_page_offset(struct rxe_mr *mr, u64 iova)
> +{
> +	return iova & (mr_page_size(mr) - 1);
> +}
> +
>  /* rxe_mw.c */
>  int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata);
>  int rxe_dealloc_mw(struct ib_mw *ibmw);
> diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
> index 0e538fafcc20..ffbac6f5e828 100644
> --- a/drivers/infiniband/sw/rxe/rxe_mr.c
> +++ b/drivers/infiniband/sw/rxe/rxe_mr.c
> @@ -49,7 +49,7 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
>  				| IB_ACCESS_REMOTE_WRITE	\
>  				| IB_ACCESS_REMOTE_ATOMIC)
>  
> -static void rxe_mr_init(int access, struct rxe_mr *mr)
> +void rxe_mr_init(int access, struct rxe_mr *mr)
>  {
>  	u32 lkey = mr->elem.index << 8 | rxe_get_next_key(-1);
>  	u32 rkey = (access & IB_ACCESS_REMOTE) ? lkey : 0;
> @@ -77,16 +77,6 @@ void rxe_mr_init_dma(int access, struct rxe_mr *mr)
>  	mr->ibmr.type = IB_MR_TYPE_DMA;
>  }
>  
> -static unsigned long rxe_mr_iova_to_index(struct rxe_mr *mr, u64 iova)
> -{
> -	return (iova >> mr->page_shift) - (mr->ibmr.iova >> mr->page_shift);
> -}
> -
> -static unsigned long rxe_mr_iova_to_page_offset(struct rxe_mr *mr, u64 iova)
> -{
> -	return iova & (mr_page_size(mr) - 1);
> -}
> -
>  static bool is_pmem_page(struct page *pg)
>  {
>  	unsigned long paddr = page_to_phys(pg);
> @@ -96,7 +86,7 @@ static bool is_pmem_page(struct page *pg)
>  				 IORES_DESC_PERSISTENT_MEMORY);
>  }
>  
> -static int rxe_mr_fill_pages_from_sgt(struct rxe_mr *mr, struct sg_table *sgt)
> +int rxe_mr_fill_pages_from_sgt(struct rxe_mr *mr, struct sg_table *sgt)
>  {
>  	XA_STATE(xas, &mr->page_list, 0);
>  	struct sg_page_iter sg_iter;
> @@ -247,8 +237,8 @@ int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sgl,
>  	return ib_sg_to_pages(ibmr, sgl, sg_nents, sg_offset, rxe_set_page);
>  }
>  
> -static int rxe_mr_copy_xarray(struct rxe_mr *mr, u64 iova, void *addr,
> -			      unsigned int length, enum rxe_mr_copy_dir dir)
> +int rxe_mr_copy_xarray(struct rxe_mr *mr, u64 iova, void *addr,
> +		       unsigned int length, enum rxe_mr_copy_dir dir)
>  {
>  	unsigned int page_offset = rxe_mr_iova_to_page_offset(mr, iova);
>  	unsigned long index = rxe_mr_iova_to_index(mr, iova);

Looks good.

Reviewed-by: Bob Pearson <rpearsonhpe@...il.com>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ