[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1335189109-4871-9-git-send-email-jiang.liu@huawei.com>
Date: Mon, 23 Apr 2012 21:51:49 +0800
From: Jiang Liu <liuj97@...il.com>
To: Vinod Koul <vinod.koul@...el.com>,
Dan Williams <dan.j.williams@...el.com>
Cc: Jiang Liu <jiang.liu@...wei.com>,
Keping Chen <chenkeping@...wei.com>, linux-pci@...r.kernel.org,
linux-kernel@...r.kernel.org, Jiang Liu <liuj97@...il.com>
Subject: [RFC PATCH v1 8/8] dmaengine: assign DMA channel to CPU according to NUMA affinity
On systems with multiple CPUs and DMA devices, try optimize DMA
performance by assigning DMA channel to CPU according to NUMA
affinity relationship. This may help architectures with memory
controllers and DMA devices built into physical processors.
Signed-off-by: Jiang Liu <liuj97@...il.com>
---
drivers/dma/dmaengine.c | 45 +++++++++++++++++++++++++++++++++++++++++++--
1 files changed, 43 insertions(+), 2 deletions(-)
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 1cb91df..52d748c 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -289,6 +289,7 @@ static dma_cap_mask_t dma_cap_mask_all;
struct dma_chan_tbl_ent {
struct dma_chan dma_chan_rcu_space *chan;
struct dma_chan *prev_chan;
+ int node;
};
/**
@@ -481,6 +482,46 @@ static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
return ret;
}
+/* Assign DMA channels to CPUs according to NUMA affinity relationship */
+static void dma_channel_set(int cap, int cpu, struct dma_chan *chan)
+{
+ int node;
+ int src_cpu;
+ struct dma_chan *src_chan;
+ struct dma_chan_tbl_ent *entry;
+ struct dma_chan_tbl_ent *src_entry;
+
+ entry = per_cpu_ptr(channel_table[cap], cpu);
+ node = dev_to_node(chan->device->dev);
+
+ /* Try to optimize if CPU and DMA channel belong to different node. */
+ if (node != -1 && node != cpu_to_node(cpu)) {
+ for_each_online_cpu(src_cpu) {
+ src_entry = per_cpu_ptr(channel_table[cap], src_cpu);
+ src_chan = dma_chan_rcu_get(src_entry->chan);
+
+ /*
+ * CPU online map may change beneath us due to
+ * CPU hotplug operations.
+ */
+ if (src_chan == NULL)
+ continue;
+
+ if (src_entry->node == node ||
+ cpu_to_node(src_cpu) == node) {
+ entry->node = src_entry->node;
+ src_entry->node = node;
+ dma_chan_rcu_set(entry->chan, src_chan);
+ dma_chan_rcu_set(src_entry->chan, chan);
+ return;
+ }
+ }
+ }
+
+ entry->node = node;
+ dma_chan_rcu_set(entry->chan, chan);
+}
+
/**
* dma_channel_rebalance - redistribute the available channels
*
@@ -515,8 +556,8 @@ static void dma_channel_rebalance(void)
chan = nth_chan(cap, n++);
else
chan = nth_chan(cap, -1);
- entry = per_cpu_ptr(channel_table[cap], cpu);
- dma_chan_rcu_set(entry->chan, chan);
+ if (chan)
+ dma_channel_set(cap, cpu, chan);
}
#ifdef CONFIG_DMA_ENGINE_HOTPLUG
--
1.7.5.4
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists