[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20160301234501.596256496@linuxfoundation.org>
Date: Tue, 01 Mar 2016 23:51:08 +0000
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: <linux-kernel@...r.kernel.org>
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
<stable@...r.kernel.org>, Dan Williams <dan.j.williams@...el.com>,
NeilBrown <neilb@...e.com>, Vinod Koul <vinod.koul@...el.com>
Subject: [PATCH 3.14 052/130] async_tx: use GFP_NOWAIT rather than GFP_IO
3.14-stable review patch. If anyone has any objections, please let me know.
------------------
From: NeilBrown <neilb@...e.com>
commit b02bab6b0f928d49dbfb03e1e4e9dd43647623d7 upstream.
These async_XX functions are called from md/raid5 in an atomic
section, between get_cpu() and put_cpu(), so they must not sleep.
So use GFP_NOWAIT rather than GFP_IO.
Dan Williams writes: Longer term async_tx needs to be merged into md
directly as we can allocate this unmap data statically per-stripe
rather than per request.
Fixed: 7476bd79fc01 ("async_pq: convert to dmaengine_unmap_data")
Reported-and-tested-by: Stanislav Samsonov <slava@...apurnalabs.com>
Acked-by: Dan Williams <dan.j.williams@...el.com>
Signed-off-by: NeilBrown <neilb@...e.com>
Signed-off-by: Vinod Koul <vinod.koul@...el.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
crypto/async_tx/async_memcpy.c | 2 +-
crypto/async_tx/async_pq.c | 4 ++--
crypto/async_tx/async_raid6_recov.c | 4 ++--
crypto/async_tx/async_xor.c | 4 ++--
4 files changed, 7 insertions(+), 7 deletions(-)
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -53,7 +53,7 @@ async_memcpy(struct page *dest, struct p
struct dmaengine_unmap_data *unmap = NULL;
if (device)
- unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO);
+ unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) {
unsigned long dma_prep_flags = 0;
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -176,7 +176,7 @@ async_gen_syndrome(struct page **blocks,
BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks)));
if (device)
- unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO);
+ unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
if (unmap &&
(src_cnt <= dma_maxpq(device, 0) ||
@@ -294,7 +294,7 @@ async_syndrome_val(struct page **blocks,
BUG_ON(disks < 4);
if (device)
- unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO);
+ unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
if (unmap && disks <= dma_maxpq(device, 0) &&
is_dma_pq_aligned(device, offset, 0, len)) {
--- a/crypto/async_tx/async_raid6_recov.c
+++ b/crypto/async_tx/async_raid6_recov.c
@@ -41,7 +41,7 @@ async_sum_product(struct page *dest, str
u8 *a, *b, *c;
if (dma)
- unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO);
+ unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT);
if (unmap) {
struct device *dev = dma->dev;
@@ -105,7 +105,7 @@ async_mult(struct page *dest, struct pag
u8 *d, *s;
if (dma)
- unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO);
+ unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT);
if (unmap) {
dma_addr_t dma_dest[2];
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -182,7 +182,7 @@ async_xor(struct page *dest, struct page
BUG_ON(src_cnt <= 1);
if (device)
- unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOIO);
+ unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOWAIT);
if (unmap && is_dma_xor_aligned(device, offset, 0, len)) {
struct dma_async_tx_descriptor *tx;
@@ -278,7 +278,7 @@ async_xor_val(struct page *dest, struct
BUG_ON(src_cnt <= 1);
if (device)
- unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOIO);
+ unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOWAIT);
if (unmap && src_cnt <= device->max_xor &&
is_dma_xor_aligned(device, offset, 0, len)) {
Powered by blists - more mailing lists