[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20060728181629.5948.78653.stgit@dwillia2-linux.ch.intel.com>
Date: Fri, 28 Jul 2006 11:16:29 -0700
From: Dan Williams <dan.j.williams@...el.com>
To: davem@...emloft.net, linux-kernel@...r.kernel.org
Cc: neilb@...e.de, galak@...nel.crashing.org,
christopher.leech@...el.com, alan@...rguk.ukuu.org.uk,
dan.j.williams@...el.com
Subject: [PATCH rev2 3/4] dmaengine: expose per channel dma mapping characteristics to clients
From: Dan Williams <dan.j.williams@...el.com>
Allow a client to ensure that the dma channel it has selected can
dma to the specified buffer or page address. Also allow the client to
pre-map address ranges to be passed to the operations API.
version 2: make the dmaengine api EXPORT_SYMBOL_GPL
Signed-off-by: Dan Williams <dan.j.williams@...el.com>
---
drivers/dma/dmaengine.c | 4 ++++
drivers/dma/ioatdma.c | 35 +++++++++++++++++++++++++++++++++++
include/linux/dmaengine.h | 34 ++++++++++++++++++++++++++++++++++
3 files changed, 73 insertions(+), 0 deletions(-)
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 2aaf21c..ff01e3a 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -631,3 +631,7 @@ EXPORT_SYMBOL_GPL(dma_async_device_unreg
EXPORT_SYMBOL_GPL(dma_chan_cleanup);
EXPORT_SYMBOL_GPL(dma_async_do_xor_err);
EXPORT_SYMBOL_GPL(dma_async_chan_init);
+EXPORT_SYMBOL_GPL(dma_async_map_page);
+EXPORT_SYMBOL_GPL(dma_async_map_single);
+EXPORT_SYMBOL_GPL(dma_async_unmap_page);
+EXPORT_SYMBOL_GPL(dma_async_unmap_single);
diff --git a/drivers/dma/ioatdma.c b/drivers/dma/ioatdma.c
index 511e8c1..c7bae96 100644
--- a/drivers/dma/ioatdma.c
+++ b/drivers/dma/ioatdma.c
@@ -638,6 +638,37 @@ extern dma_cookie_t dma_async_do_xor_err
unsigned int src_off, size_t len, u32 *result,
unsigned long flags);
+static dma_addr_t ioat_map_page(struct dma_chan *chan, struct page *page,
+ unsigned long offset, size_t size,
+ int direction)
+{
+ struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
+ return pci_map_page(ioat_chan->device->pdev, page, offset, size,
+ direction);
+}
+
+static dma_addr_t ioat_map_single(struct dma_chan *chan, void *cpu_addr,
+ size_t size, int direction)
+{
+ struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
+ return pci_map_single(ioat_chan->device->pdev, cpu_addr, size,
+ direction);
+}
+
+static void ioat_unmap_page(struct dma_chan *chan, dma_addr_t handle,
+ size_t size, int direction)
+{
+ struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
+ pci_unmap_page(ioat_chan->device->pdev, handle, size, direction);
+}
+
+static void ioat_unmap_single(struct dma_chan *chan, dma_addr_t handle,
+ size_t size, int direction)
+{
+ struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
+ pci_unmap_single(ioat_chan->device->pdev, handle, size, direction);
+}
+
static int __devinit ioat_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -718,6 +749,10 @@ #endif
device->common.capabilities = DMA_MEMCPY;
device->common.device_do_dma_memcpy = do_ioat_dma_memcpy;
device->common.device_do_dma_xor = dma_async_do_xor_err;
+ device->common.map_page = ioat_map_page;
+ device->common.map_single = ioat_map_single;
+ device->common.unmap_page = ioat_unmap_page;
+ device->common.unmap_single = ioat_unmap_single;
printk(KERN_INFO "Intel(R) I/OAT DMA Engine found, %d channels\n",
device->common.chancnt);
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index d325919..33699be 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -287,6 +287,15 @@ struct dma_device {
enum dma_status (*device_operation_complete)(struct dma_chan *chan,
dma_cookie_t cookie, dma_cookie_t *last,
dma_cookie_t *used);
+ dma_addr_t (*map_page)(struct dma_chan *chan, struct page *page,
+ unsigned long offset, size_t size,
+ int direction);
+ dma_addr_t (*map_single)(struct dma_chan *chan, void *cpu_addr,
+ size_t size, int direction);
+ void (*unmap_page)(struct dma_chan *chan, dma_addr_t handle,
+ size_t size, int direction);
+ void (*unmap_single)(struct dma_chan *chan, dma_addr_t handle,
+ size_t size, int direction);
void (*device_issue_pending)(struct dma_chan *chan);
};
@@ -595,6 +604,31 @@ static inline enum dma_status dma_async_
return DMA_IN_PROGRESS;
}
+static inline dma_addr_t dma_async_map_page(struct dma_chan *chan,
+ struct page *page, unsigned long offset, size_t size,
+ int direction)
+{
+ return chan->device->map_page(chan, page, offset, size, direction);
+}
+
+static inline dma_addr_t dma_async_map_single(struct dma_chan *chan,
+ void *cpu_addr, size_t size, int direction)
+{
+ return chan->device->map_single(chan, cpu_addr, size, direction);
+}
+
+static inline void dma_async_unmap_page(struct dma_chan *chan,
+ dma_addr_t handle, size_t size, int direction)
+{
+ chan->device->unmap_page(chan, handle, size, direction);
+}
+
+static inline void dma_async_unmap_single(struct dma_chan *chan,
+ dma_addr_t handle, size_t size, int direction)
+{
+ chan->device->unmap_single(chan, handle, size, direction);
+}
+
/* --- DMA device --- */
int dma_async_device_register(struct dma_device *device);
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists