[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1439363150-8661-23-git-send-email-hch@lst.de>
Date: Wed, 12 Aug 2015 09:05:41 +0200
From: Christoph Hellwig <hch@....de>
To: torvalds@...ux-foundation.org, axboe@...nel.dk
Cc: dan.j.williams@...el.com, vgupta@...opsys.com,
hskinnemoen@...il.com, egtvedt@...fundet.no, realmz6@...il.com,
dhowells@...hat.com, monstr@...str.eu, x86@...nel.org,
dwmw2@...radead.org, alex.williamson@...hat.com,
grundler@...isc-linux.org, linux-kernel@...r.kernel.org,
linux-arch@...r.kernel.org, linux-alpha@...r.kernel.org,
linux-ia64@...r.kernel.org, linux-metag@...r.kernel.org,
linux-mips@...ux-mips.org, linux-parisc@...r.kernel.org,
linuxppc-dev@...ts.ozlabs.org, linux-s390@...r.kernel.org,
sparclinux@...r.kernel.org, linux-xtensa@...ux-xtensa.org,
linux-nvdimm@...1.01.org, linux-media@...r.kernel.org
Subject: [PATCH 22/31] metag: handle page-less SG entries
Make all cache invalidation conditional on sg_has_page().
Signed-off-by: Christoph Hellwig <hch@....de>
---
arch/metag/include/asm/dma-mapping.h | 22 ++++++++++++----------
1 file changed, 12 insertions(+), 10 deletions(-)
diff --git a/arch/metag/include/asm/dma-mapping.h b/arch/metag/include/asm/dma-mapping.h
index eb5cdec..2ae9057 100644
--- a/arch/metag/include/asm/dma-mapping.h
+++ b/arch/metag/include/asm/dma-mapping.h
@@ -55,10 +55,9 @@ dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
WARN_ON(nents == 0 || sglist[0].length == 0);
for_each_sg(sglist, sg, nents, i) {
- BUG_ON(!sg_page(sg));
-
sg->dma_address = sg_phys(sg);
- dma_sync_for_device(sg_virt(sg), sg->length, direction);
+ if (sg_has_page(sg))
+ dma_sync_for_device(sg_virt(sg), sg->length, direction);
}
return nents;
@@ -94,10 +93,9 @@ dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nhwentries,
WARN_ON(nhwentries == 0 || sglist[0].length == 0);
for_each_sg(sglist, sg, nhwentries, i) {
- BUG_ON(!sg_page(sg));
-
sg->dma_address = sg_phys(sg);
- dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
+ if (sg_has_page(sg))
+ dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
}
}
@@ -140,8 +138,10 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nelems,
int i;
struct scatterlist *sg;
- for_each_sg(sglist, sg, nelems, i)
- dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
+ for_each_sg(sglist, sg, nelems, i) {
+ if (sg_has_page(sg))
+ dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
+ }
}
static inline void
@@ -151,8 +151,10 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
int i;
struct scatterlist *sg;
- for_each_sg(sglist, sg, nelems, i)
- dma_sync_for_device(sg_virt(sg), sg->length, direction);
+ for_each_sg(sglist, sg, nelems, i) {
+ if (sg_has_page(sg))
+ dma_sync_for_device(sg_virt(sg), sg->length, direction);
+ }
}
static inline int
--
1.9.1
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists