[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1337003229-9158-8-git-send-email-jiang.liu@huawei.com>
Date: Mon, 14 May 2012 21:47:09 +0800
From: Jiang Liu <liuj97@...il.com>
To: Dan Williams <dan.j.williams@...el.com>,
Maciej Sosnowski <maciej.sosnowski@...el.com>,
Vinod Koul <vinod.koul@...el.com>
Cc: Jiang Liu <jiang.liu@...wei.com>,
Keping Chen <chenkeping@...wei.com>,
linux-kernel@...r.kernel.org, linux-pci@...r.kernel.org,
Jiang Liu <liuj97@...il.com>
Subject: [RFC PATCH v2 7/7] dmaengine: assign DMA channel to CPU according to NUMA affinity
From: Jiang Liu <jiang.liu@...wei.com>
On systems with multiple CPUs and DMA devices, try optimize DMA
performance by assigning DMA channels to CPUs according to NUMA
affinity relationship. This may help architectures with memory
controllers and DMA devices built into the same physical processor
to avoid unnecessary cross socket traffic.
Signed-off-by: Jiang Liu <liuj97@...il.com>
---
drivers/dma/dmaengine.c | 45 +++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 43 insertions(+), 2 deletions(-)
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index eca45c0..8a41bdf 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -266,6 +266,7 @@ static dma_cap_mask_t dma_cap_mask_all;
struct dma_chan_tbl_ent {
struct dma_chan *chan;
struct dma_chan *prev_chan;
+ int node;
};
/**
@@ -467,6 +468,46 @@ static void dma_channel_quiesce(void)
#endif
}
+/* Assign DMA channels to CPUs according to NUMA affinity relationship */
+static void dma_channel_set(int cap, int cpu, struct dma_chan *chan)
+{
+ int node;
+ int src_cpu;
+ struct dma_chan *src_chan;
+ struct dma_chan_tbl_ent *entry;
+ struct dma_chan_tbl_ent *src_entry;
+
+ entry = per_cpu_ptr(channel_table[cap], cpu);
+ node = dev_to_node(chan->device->dev);
+
+ /* Try to optimize if CPU and DMA channel belong to different node. */
+ if (node != -1 && node != cpu_to_node(cpu)) {
+ for_each_online_cpu(src_cpu) {
+ src_entry = per_cpu_ptr(channel_table[cap], src_cpu);
+ src_chan = src_entry->chan;
+
+ /*
+ * CPU online map may change beneath us due to
+ * CPU hotplug operations.
+ */
+ if (src_chan == NULL)
+ continue;
+
+ if (src_entry->node == node ||
+ cpu_to_node(src_cpu) == node) {
+ entry->node = src_entry->node;
+ src_entry->node = node;
+ entry->chan = src_chan;
+ src_entry->chan = chan;
+ return;
+ }
+ }
+ }
+
+ entry->node = node;
+ entry->chan = chan;
+}
+
/**
* dma_channel_rebalance - redistribute the available channels
*
@@ -501,8 +542,8 @@ static void dma_channel_rebalance(bool quiesce)
chan = nth_chan(cap, n++);
else
chan = nth_chan(cap, -1);
- entry = per_cpu_ptr(channel_table[cap], cpu);
- entry->chan = chan;
+ if (chan)
+ dma_channel_set(cap, cpu, chan);
}
if (quiesce)
--
1.7.9.5
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists