lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220219005221.634-23-bhe@redhat.com>
Date:   Sat, 19 Feb 2022 08:52:21 +0800
From:   Baoquan He <bhe@...hat.com>
To:     linux-kernel@...r.kernel.org
Cc:     linux-mm@...ck.org, akpm@...ux-foundation.org, hch@....de,
        cl@...ux.com, 42.hyeyoo@...il.com, penberg@...nel.org,
        rientjes@...gle.com, iamjoonsoo.kim@....com, vbabka@...e.cz,
        David.Laight@...LAB.COM, david@...hat.com,
        herbert@...dor.apana.org.au, davem@...emloft.net,
        linux-crypto@...r.kernel.org, steffen.klassert@...unet.com,
        netdev@...r.kernel.org, hca@...ux.ibm.com, gor@...ux.ibm.com,
        agordeev@...ux.ibm.com, borntraeger@...ux.ibm.com,
        svens@...ux.ibm.com, linux-s390@...r.kernel.org, michael@...le.cc,
        linux-i2c@...r.kernel.org, wsa@...nel.org
Subject: [PATCH 22/22] mtd: rawnand: Use dma_alloc_noncoherent() for dma buffer

Use dma_alloc_noncoherent() instead of directly allocating buffer
from kmalloc with GFP_DMA. DMA API will try to allocate buffer
depending on devices addressing limitation.

[ 42.hyeyoo@...il.com: Use dma_alloc_noncoherent() instead of
  __get_free_page() and update changelog.

  As it does not allocate high order buffers, allocate buffer
  when needed and free after DMA. ]

Signed-off-by: Baoquan He <bhe@...hat.com>
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@...il.com>
Cc: Miquel Raynal <miquel.raynal@...tlin.com>
Cc: Richard Weinberger <richard@....at>
Cc: Vignesh Raghavendra <vigneshr@...com>
Cc: Sumit Semwal <sumit.semwal@...aro.org>
Cc: christian.koenig@....com
Cc: linux-mtd@...ts.infradead.org

---
 drivers/mtd/nand/raw/marvell_nand.c | 55 ++++++++++++++++++-----------
 1 file changed, 34 insertions(+), 21 deletions(-)

diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index 2455a581fd70..c0b64a7e50af 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -860,26 +860,45 @@ static int marvell_nfc_xfer_data_dma(struct marvell_nfc *nfc,
 	struct dma_async_tx_descriptor *tx;
 	struct scatterlist sg;
 	dma_cookie_t cookie;
-	int ret;
+	dma_addr_t dma_handle;
+	int ret = 0;
 
 	marvell_nfc_enable_dma(nfc);
+
+	/*
+	 * DMA must act on length multiple of 32 and this length may be
+	 * bigger than the destination buffer. Use this buffer instead
+	 * for DMA transfers and then copy the desired amount of data to
+	 * the provided buffer.
+	 */
+	nfc->dma_buf = dma_alloc_noncoherent(nfc->dev, MAX_CHUNK_SIZE,
+						&dma_handle,
+						direction,
+						GFP_ATOMIC);
+	if (!nfc->dma_buf) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+
 	/* Prepare the DMA transfer */
-	sg_init_one(&sg, nfc->dma_buf, dma_len);
-	dma_map_sg(nfc->dma_chan->device->dev, &sg, 1, direction);
-	tx = dmaengine_prep_slave_sg(nfc->dma_chan, &sg, 1,
+	tx = dmaengine_prep_slave_single(nfc->dma_chan, dma_handle, dma_len,
 				     direction == DMA_FROM_DEVICE ?
 				     DMA_DEV_TO_MEM : DMA_MEM_TO_DEV,
 				     DMA_PREP_INTERRUPT);
 	if (!tx) {
 		dev_err(nfc->dev, "Could not prepare DMA S/G list\n");
-		return -ENXIO;
+		ret = -ENXIO;
+		goto free;
 	}
 
 	/* Do the task and wait for it to finish */
 	cookie = dmaengine_submit(tx);
 	ret = dma_submit_error(cookie);
-	if (ret)
-		return -EIO;
+	if (ret) {
+		ret = -EIO;
+		goto free;
+	}
 
 	dma_async_issue_pending(nfc->dma_chan);
 	ret = marvell_nfc_wait_cmdd(nfc->selected_chip);
@@ -889,10 +908,16 @@ static int marvell_nfc_xfer_data_dma(struct marvell_nfc *nfc,
 		dev_err(nfc->dev, "Timeout waiting for DMA (status: %d)\n",
 			dmaengine_tx_status(nfc->dma_chan, cookie, NULL));
 		dmaengine_terminate_all(nfc->dma_chan);
-		return -ETIMEDOUT;
+		ret = -ETIMEDOUT;
+		goto free;
 	}
 
-	return 0;
+free:
+	dma_free_noncoherent(nfc->dev, MAX_CHUNK_SIZE, nfc->dma_buf,
+			     dma_handle, direction);
+
+out:
+	return ret;
 }
 
 static int marvell_nfc_xfer_data_in_pio(struct marvell_nfc *nfc, u8 *in,
@@ -2814,18 +2839,6 @@ static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
 		goto release_channel;
 	}
 
-	/*
-	 * DMA must act on length multiple of 32 and this length may be
-	 * bigger than the destination buffer. Use this buffer instead
-	 * for DMA transfers and then copy the desired amount of data to
-	 * the provided buffer.
-	 */
-	nfc->dma_buf = kmalloc(MAX_CHUNK_SIZE, GFP_KERNEL | GFP_DMA);
-	if (!nfc->dma_buf) {
-		ret = -ENOMEM;
-		goto release_channel;
-	}
-
 	nfc->use_dma = true;
 
 	return 0;
-- 
2.17.2

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ