[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20170921085922.11659-5-ganapatrao.kulkarni@cavium.com>
Date: Thu, 21 Sep 2017 14:29:22 +0530
From: Ganapatrao Kulkarni <ganapatrao.kulkarni@...ium.com>
To: linux-kernel@...r.kernel.org, linux-arm-kernel@...ts.infradead.org,
iommu@...ts.linux-foundation.org, linux-mm@...ck.org
Cc: Will.Deacon@....com, robin.murphy@....com,
lorenzo.pieralisi@....com, hanjun.guo@...aro.org, joro@...tes.org,
vbabka@...e.cz, akpm@...ux-foundation.org, mhocko@...e.com,
Tomasz.Nowicki@...ium.com, Robert.Richter@...ium.com,
jnair@...iumnetworks.com, gklkml16@...il.com
Subject: [PATCH 4/4] iommu/dma, numa: Use NUMA aware memory allocations in __iommu_dma_alloc_pages
Change function __iommu_dma_alloc_pages to allocate memory/pages
for dma from respective device numa node.
Signed-off-by: Ganapatrao Kulkarni <ganapatrao.kulkarni@...ium.com>
---
drivers/iommu/dma-iommu.c | 17 ++++++++++-------
1 file changed, 10 insertions(+), 7 deletions(-)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 9d1cebe..0626b58 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -428,20 +428,21 @@ static void __iommu_dma_free_pages(struct page **pages, int count)
kvfree(pages);
}
-static struct page **__iommu_dma_alloc_pages(unsigned int count,
- unsigned long order_mask, gfp_t gfp)
+static struct page **__iommu_dma_alloc_pages(struct device *dev,
+ unsigned int count, unsigned long order_mask, gfp_t gfp)
{
struct page **pages;
unsigned int i = 0, array_size = count * sizeof(*pages);
+ int numa_node = dev_to_node(dev);
order_mask &= (2U << MAX_ORDER) - 1;
if (!order_mask)
return NULL;
if (array_size <= PAGE_SIZE)
- pages = kzalloc(array_size, GFP_KERNEL);
+ pages = kzalloc_node(array_size, GFP_KERNEL, numa_node);
else
- pages = vzalloc(array_size);
+ pages = vzalloc_node(array_size, numa_node);
if (!pages)
return NULL;
@@ -462,8 +463,9 @@ static struct page **__iommu_dma_alloc_pages(unsigned int count,
unsigned int order = __fls(order_mask);
order_size = 1U << order;
- page = alloc_pages((order_mask - order_size) ?
- gfp | __GFP_NORETRY : gfp, order);
+ page = alloc_pages_node(numa_node,
+ (order_mask - order_size) ?
+ gfp | __GFP_NORETRY : gfp, order);
if (!page)
continue;
if (!order)
@@ -548,7 +550,8 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
alloc_sizes = min_size;
count = PAGE_ALIGN(size) >> PAGE_SHIFT;
- pages = __iommu_dma_alloc_pages(count, alloc_sizes >> PAGE_SHIFT, gfp);
+ pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT,
+ gfp);
if (!pages)
return NULL;
--
2.9.4
Powered by blists - more mailing lists