[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220607164944.101702481@linuxfoundation.org>
Date: Tue, 7 Jun 2022 18:59:36 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Mark ONeill <mao@...blingdice.co.uk>,
Christoph Hellwig <hch@....de>, Sasha Levin <sashal@...nel.org>
Subject: [PATCH 5.15 311/667] dma-direct: dont fail on highmem CMA pages in dma_direct_alloc_pages
From: Christoph Hellwig <hch@....de>
[ Upstream commit 92826e967535db2eb117db227b1191aaf98e4bb3 ]
When dma_direct_alloc_pages encounters a highmem page it just gives up
currently. But what we really should do is to try memory using the
page allocator instead - without this platforms with a global highmem
CMA pool will fail all dma_alloc_pages allocations.
Fixes: efa70f2fdc84 ("dma-mapping: add a new dma_alloc_pages API")
Reported-by: Mark O'Neill <mao@...blingdice.co.uk>
Signed-off-by: Christoph Hellwig <hch@....de>
Signed-off-by: Sasha Levin <sashal@...nel.org>
---
kernel/dma/direct.c | 27 ++++++++++-----------------
1 file changed, 10 insertions(+), 17 deletions(-)
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index ddaac01f2cba..a1573ed2ea18 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -85,7 +85,7 @@ static void __dma_direct_free_pages(struct device *dev, struct page *page,
}
static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
- gfp_t gfp)
+ gfp_t gfp, bool allow_highmem)
{
int node = dev_to_node(dev);
struct page *page = NULL;
@@ -106,9 +106,12 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
}
page = dma_alloc_contiguous(dev, size, gfp);
- if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
- dma_free_contiguous(dev, page, size);
- page = NULL;
+ if (page) {
+ if (!dma_coherent_ok(dev, page_to_phys(page), size) ||
+ (!allow_highmem && PageHighMem(page))) {
+ dma_free_contiguous(dev, page, size);
+ page = NULL;
+ }
}
again:
if (!page)
@@ -154,7 +157,7 @@ static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size,
{
struct page *page;
- page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
+ page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true);
if (!page)
return NULL;
@@ -209,7 +212,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
/* we always manually zero the memory once we are done */
- page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
+ page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true);
if (!page)
return NULL;
@@ -335,19 +338,9 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
!is_swiotlb_for_alloc(dev))
return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
- page = __dma_direct_alloc_pages(dev, size, gfp);
+ page = __dma_direct_alloc_pages(dev, size, gfp, false);
if (!page)
return NULL;
- if (PageHighMem(page)) {
- /*
- * Depending on the cma= arguments and per-arch setup
- * dma_alloc_contiguous could return highmem pages.
- * Without remapping there is no way to return them here,
- * so log an error and fail.
- */
- dev_info(dev, "Rejecting highmem page from CMA.\n");
- goto out_free_pages;
- }
ret = page_address(page);
if (force_dma_unencrypted(dev)) {
--
2.35.1
Powered by blists - more mailing lists