[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20221113163535.884299-4-hch@lst.de>
Date: Sun, 13 Nov 2022 17:35:31 +0100
From: Christoph Hellwig <hch@....de>
To: Dennis Dalessandro <dennis.dalessandro@...nelisnetworks.com>,
Mauro Carvalho Chehab <mchehab@...nel.org>,
Alexandra Winter <wintera@...ux.ibm.com>,
Wenjia Zhang <wenjia@...ux.ibm.com>,
Marek Szyprowski <m.szyprowski@...sung.com>,
Jaroslav Kysela <perex@...ex.cz>,
Takashi Iwai <tiwai@...e.com>,
Russell King <linux@...linux.org.uk>,
Robin Murphy <robin.murphy@....com>
Cc: linux-arm-kernel@...ts.infradead.org, linux-rdma@...r.kernel.org,
iommu@...ts.linux.dev, linux-media@...r.kernel.org,
netdev@...r.kernel.org, linux-s390@...r.kernel.org,
alsa-devel@...a-project.org
Subject: [PATCH 3/7] RDMA/qib: don't pass bogus GFP_ flags to dma_alloc_coherent
dma_alloc_coherent is an opaque allocator that only uses the GFP_ flags
for allocation context control. Don't pass GFP_USER which doesn't make
sense for a kernel DMA allocation or __GFP_COMP which makes no sense
for an allocation that can't in any way be converted to a page pointer.
Signed-off-by: Christoph Hellwig <hch@....de>
---
drivers/infiniband/hw/qib/qib_iba6120.c | 2 +-
drivers/infiniband/hw/qib/qib_init.c | 21 ++++-----------------
2 files changed, 5 insertions(+), 18 deletions(-)
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index aea571943768b..07386117f21ad 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -2075,7 +2075,7 @@ static void alloc_dummy_hdrq(struct qib_devdata *dd)
dd->cspec->dummy_hdrq = dma_alloc_coherent(&dd->pcidev->dev,
dd->rcd[0]->rcvhdrq_size,
&dd->cspec->dummy_hdrq_phys,
- GFP_ATOMIC | __GFP_COMP);
+ GFP_ATOMIC);
if (!dd->cspec->dummy_hdrq) {
qib_devinfo(dd->pcidev, "Couldn't allocate dummy hdrq\n");
/* fallback to just 0'ing */
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 45211008449fb..33667becd52b0 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -1546,18 +1546,14 @@ int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
if (!rcd->rcvhdrq) {
dma_addr_t phys_hdrqtail;
- gfp_t gfp_flags;
amt = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *
sizeof(u32), PAGE_SIZE);
- gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ?
- GFP_USER : GFP_KERNEL;
old_node_id = dev_to_node(&dd->pcidev->dev);
set_dev_node(&dd->pcidev->dev, rcd->node_id);
- rcd->rcvhdrq = dma_alloc_coherent(
- &dd->pcidev->dev, amt, &rcd->rcvhdrq_phys,
- gfp_flags | __GFP_COMP);
+ rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt,
+ &rcd->rcvhdrq_phys, GFP_KERNEL);
set_dev_node(&dd->pcidev->dev, old_node_id);
if (!rcd->rcvhdrq) {
@@ -1577,7 +1573,7 @@ int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
set_dev_node(&dd->pcidev->dev, rcd->node_id);
rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(
&dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
- gfp_flags);
+ GFP_KERNEL);
set_dev_node(&dd->pcidev->dev, old_node_id);
if (!rcd->rcvhdrtail_kvaddr)
goto bail_free;
@@ -1621,17 +1617,8 @@ int qib_setup_eagerbufs(struct qib_ctxtdata *rcd)
struct qib_devdata *dd = rcd->dd;
unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff;
size_t size;
- gfp_t gfp_flags;
int old_node_id;
- /*
- * GFP_USER, but without GFP_FS, so buffer cache can be
- * coalesced (we hope); otherwise, even at order 4,
- * heavy filesystem activity makes these fail, and we can
- * use compound pages.
- */
- gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
-
egrcnt = rcd->rcvegrcnt;
egroff = rcd->rcvegr_tid_base;
egrsize = dd->rcvegrbufsize;
@@ -1663,7 +1650,7 @@ int qib_setup_eagerbufs(struct qib_ctxtdata *rcd)
rcd->rcvegrbuf[e] =
dma_alloc_coherent(&dd->pcidev->dev, size,
&rcd->rcvegrbuf_phys[e],
- gfp_flags);
+ GFP_KERNEL);
set_dev_node(&dd->pcidev->dev, old_node_id);
if (!rcd->rcvegrbuf[e])
goto bail_rcvegrbuf_phys;
--
2.30.2
Powered by blists - more mailing lists