[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20060728181634.5948.67248.stgit@dwillia2-linux.ch.intel.com>
Date: Fri, 28 Jul 2006 11:16:35 -0700
From: Dan Williams <dan.j.williams@...el.com>
To: davem@...emloft.net, linux-kernel@...r.kernel.org
Cc: neilb@...e.de, galak@...nel.crashing.org,
christopher.leech@...el.com, alan@...rguk.ukuu.org.uk,
dan.j.williams@...el.com
Subject: [PATCH rev2 4/4] dmaengine: add memset as an asynchronous dma operation
From: Dan Williams <dan.j.williams@...el.com>
version 2: make the dmaengine api EXPORT_SYMBOL_GPL
Signed-off-by: Dan Williams <dan.j.williams@...el.com>
---
drivers/dma/dmaengine.c | 15 ++++++++++
drivers/dma/ioatdma.c | 5 +++
include/linux/dmaengine.h | 68 +++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 88 insertions(+), 0 deletions(-)
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index ff01e3a..3b1ac94 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -605,6 +605,17 @@ dma_cookie_t dma_async_do_xor_err(struct
return -ENXIO;
}
+/**
+ * dma_async_do_memset_err - default function for dma devices that
+ * do not support memset
+ */
+dma_cookie_t dma_async_do_memset_err(struct dma_chan *chan,
+ union dmaengine_addr dest, unsigned int dest_off,
+ int val, size_t len, unsigned long flags)
+{
+ return -ENXIO;
+}
+
static int __init dma_bus_init(void)
{
mutex_init(&dma_list_mutex);
@@ -622,6 +633,9 @@ EXPORT_SYMBOL_GPL(dma_async_memcpy_pg_to
EXPORT_SYMBOL_GPL(dma_async_memcpy_dma_to_dma);
EXPORT_SYMBOL_GPL(dma_async_memcpy_pg_to_dma);
EXPORT_SYMBOL_GPL(dma_async_memcpy_dma_to_pg);
+EXPORT_SYMBOL_GPL(dma_async_memset_buf);
+EXPORT_SYMBOL_GPL(dma_async_memset_page);
+EXPORT_SYMBOL_GPL(dma_async_memset_dma);
EXPORT_SYMBOL_GPL(dma_async_xor_pgs_to_pg);
EXPORT_SYMBOL_GPL(dma_async_xor_dma_list_to_dma);
EXPORT_SYMBOL_GPL(dma_async_operation_complete);
@@ -630,6 +644,7 @@ EXPORT_SYMBOL_GPL(dma_async_device_regis
EXPORT_SYMBOL_GPL(dma_async_device_unregister);
EXPORT_SYMBOL_GPL(dma_chan_cleanup);
EXPORT_SYMBOL_GPL(dma_async_do_xor_err);
+EXPORT_SYMBOL_GPL(dma_async_do_memset_err);
EXPORT_SYMBOL_GPL(dma_async_chan_init);
EXPORT_SYMBOL_GPL(dma_async_map_page);
EXPORT_SYMBOL_GPL(dma_async_map_single);
diff --git a/drivers/dma/ioatdma.c b/drivers/dma/ioatdma.c
index c7bae96..5133e3d 100644
--- a/drivers/dma/ioatdma.c
+++ b/drivers/dma/ioatdma.c
@@ -638,6 +638,10 @@ extern dma_cookie_t dma_async_do_xor_err
unsigned int src_off, size_t len, u32 *result,
unsigned long flags);
+extern dma_cookie_t dma_async_do_memset_err(struct dma_chan *chan,
+ union dmaengine_addr dest, unsigned int dest_off,
+ int val, size_t size, unsigned long flags);
+
static dma_addr_t ioat_map_page(struct dma_chan *chan, struct page *page,
unsigned long offset, size_t size,
int direction)
@@ -749,6 +753,7 @@ #endif
device->common.capabilities = DMA_MEMCPY;
device->common.device_do_dma_memcpy = do_ioat_dma_memcpy;
device->common.device_do_dma_xor = dma_async_do_xor_err;
+ device->common.device_do_dma_memset = dma_async_do_memset_err;
device->common.map_page = ioat_map_page;
device->common.map_single = ioat_map_single;
device->common.unmap_page = ioat_unmap_page;
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 33699be..02c09fa 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -260,6 +260,7 @@ struct dma_chan_client_ref {
* @device_issue_pending: push appended descriptors to hardware
* @device_do_dma_memcpy: perform memcpy with a dma engine
* @device_do_dma_xor: perform block xor with a dma engine
+ * @device_do_dma_memset: perform block fill with a dma engine
*/
struct dma_device {
@@ -284,6 +285,9 @@ struct dma_device {
union dmaengine_addr src, unsigned int src_cnt,
unsigned int src_off, size_t len, u32 *result,
unsigned long flags);
+ dma_cookie_t (*device_do_dma_memset)(struct dma_chan *chan,
+ union dmaengine_addr dest, unsigned int dest_off,
+ int value, size_t len, unsigned long flags);
enum dma_status (*device_operation_complete)(struct dma_chan *chan,
dma_cookie_t cookie, dma_cookie_t *last,
dma_cookie_t *used);
@@ -478,6 +482,70 @@ static inline dma_cookie_t dma_async_mem
}
/**
+ * dma_async_memset_buf - offloaded memset
+ * @chan: DMA channel to offload memset to
+ * @buf: destination buffer
+ * @val: value to initialize the buffer
+ * @len: length
+ */
+static inline dma_cookie_t dma_async_memset_buf(struct dma_chan *chan,
+ void *buf, int val, size_t len)
+{
+ unsigned long flags = DMA_DEST_BUF;
+ union dmaengine_addr dest_addr = { .buf = buf };
+ int cpu = get_cpu();
+ per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
+ per_cpu_ptr(chan->local, cpu)->memcpy_count++;
+ put_cpu();
+
+ return chan->device->device_do_dma_memset(chan, dest_addr, 0, val,
+ len, flags);
+}
+
+/**
+ * dma_async_memset_page - offloaded memset
+ * @chan: DMA channel to offload memset to
+ * @page: destination page
+ * @offset: offset into the destination
+ * @val: value to initialize the buffer
+ * @len: length
+ */
+static inline dma_cookie_t dma_async_memset_page(struct dma_chan *chan,
+ struct page *page, unsigned int offset, int val, size_t len)
+{
+ unsigned long flags = DMA_DEST_PAGE;
+ union dmaengine_addr dest_addr = { .pg = page };
+ int cpu = get_cpu();
+ per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
+ per_cpu_ptr(chan->local, cpu)->memcpy_count++;
+ put_cpu();
+
+ return chan->device->device_do_dma_memset(chan, dest_addr, offset, val,
+ len, flags);
+}
+
+/**
+ * dma_async_memset_dma - offloaded memset
+ * @chan: DMA channel to offload memset to
+ * @page: destination dma address
+ * @val: value to initialize the buffer
+ * @len: length
+ */
+static inline dma_cookie_t dma_async_memset_dma(struct dma_chan *chan,
+ dma_addr_t dma, int val, size_t len)
+{
+ unsigned long flags = DMA_DEST_DMA;
+ union dmaengine_addr dest_addr = { .dma = dma };
+ int cpu = get_cpu();
+ per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
+ per_cpu_ptr(chan->local, cpu)->memcpy_count++;
+ put_cpu();
+
+ return chan->device->device_do_dma_memset(chan, dest_addr, 0, val,
+ len, flags);
+}
+
+/**
* dma_async_xor_pgs_to_pg - offloaded xor from pages to page
* @chan: DMA channel to offload xor to
* @dest_page: destination page
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists