[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <8cdbcbbb-5064-ae7f-af4a-abb0e4203b6d@arm.com>
Date: Thu, 8 Jul 2021 18:22:17 +0100
From: Robin Murphy <robin.murphy@....com>
To: David Stevens <stevensd@...omium.org>,
Joerg Roedel <joro@...tes.org>, Will Deacon <will@...nel.org>
Cc: Sergey Senozhatsky <senozhatsky@...omium.org>,
iommu@...ts.linux-foundation.org, Christoph Hellwig <hch@....de>,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH 1/4] dma-iommu: add kalloc gfp flag to alloc helper
On 2021-07-07 08:55, David Stevens wrote:
> From: David Stevens <stevensd@...omium.org>
>
> Add gfp flag for kalloc calls within __iommu_dma_alloc_pages, so the
> function can be called from atomic contexts.
Why bother? If you need GFP_ATOMIC for allocating the pages array, then
you don't not need it for allocating the pages themselves. It's hardly
rocket science to infer one from the other.
Robin.
> Signed-off-by: David Stevens <stevensd@...omium.org>
> ---
> drivers/iommu/dma-iommu.c | 13 +++++++------
> 1 file changed, 7 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
> index 614f0dd86b08..00993b56c977 100644
> --- a/drivers/iommu/dma-iommu.c
> +++ b/drivers/iommu/dma-iommu.c
> @@ -593,7 +593,8 @@ static void __iommu_dma_free_pages(struct page **pages, int count)
> }
>
> static struct page **__iommu_dma_alloc_pages(struct device *dev,
> - unsigned int count, unsigned long order_mask, gfp_t gfp)
> + unsigned int count, unsigned long order_mask,
> + gfp_t page_gfp, gfp_t kalloc_gfp)
> {
> struct page **pages;
> unsigned int i = 0, nid = dev_to_node(dev);
> @@ -602,15 +603,15 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
> if (!order_mask)
> return NULL;
>
> - pages = kvzalloc(count * sizeof(*pages), GFP_KERNEL);
> + pages = kvzalloc(count * sizeof(*pages), kalloc_gfp);
> if (!pages)
> return NULL;
>
> /* IOMMU can map any pages, so himem can also be used here */
> - gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
> + page_gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
>
> /* It makes no sense to muck about with huge pages */
> - gfp &= ~__GFP_COMP;
> + page_gfp &= ~__GFP_COMP;
>
> while (count) {
> struct page *page = NULL;
> @@ -624,7 +625,7 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
> for (order_mask &= (2U << __fls(count)) - 1;
> order_mask; order_mask &= ~order_size) {
> unsigned int order = __fls(order_mask);
> - gfp_t alloc_flags = gfp;
> + gfp_t alloc_flags = page_gfp;
>
> order_size = 1U << order;
> if (order_mask > order_size)
> @@ -680,7 +681,7 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
>
> count = PAGE_ALIGN(size) >> PAGE_SHIFT;
> pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT,
> - gfp);
> + gfp, GFP_KERNEL);
> if (!pages)
> return NULL;
>
>
Powered by blists - more mailing lists