lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1473692159-4017-6-git-send-email-kan.liang@intel.com>
Date:   Mon, 12 Sep 2016 07:55:38 -0700
From:   kan.liang@...el.com
To:     davem@...emloft.net, linux-kernel@...r.kernel.org,
        netdev@...r.kernel.org
Cc:     jeffrey.t.kirsher@...el.com, mingo@...hat.com,
        peterz@...radead.org, kuznet@....inr.ac.ru, jmorris@...ei.org,
        yoshfuji@...ux-ipv6.org, kaber@...sh.net,
        akpm@...ux-foundation.org, keescook@...omium.org,
        viro@...iv.linux.org.uk, gorcunov@...nvz.org,
        john.stultz@...aro.org, aduyck@...antis.com, ben@...adent.org.uk,
        decot@...glers.com, fw@...len.de, alexander.duyck@...il.com,
        daniel@...earbox.net, tom@...bertland.com, rdunlap@...radead.org,
        xiyou.wangcong@...il.com, hannes@...essinduktion.org,
        stephen@...workplumber.org, alexei.starovoitov@...il.com,
        jesse.brandeburg@...el.com, andi@...stfloor.org,
        Kan Liang <kan.liang@...el.com>
Subject: [RFC V3 PATCH 05/26] net/netpolicy: create CPU and queue mapping

From: Kan Liang <kan.liang@...el.com>

Current implementation forces CPU and queue 1:1 mapping. This patch
introduces the function netpolicy_update_sys_map to create this mapping.
The result is stored in netpolicy_sys_info.

If the CPU count and queue count are different, the remaining
CPUs/queues are not used for now.

CPU hotplug, device hotplug or ethtool may change the CPU count or
queue count. For these cases, this function can also be called to
reconstruct the mapping. These cases will be handled later in this
series.

Signed-off-by: Kan Liang <kan.liang@...el.com>
---
 include/linux/netpolicy.h | 18 ++++++++++++
 net/core/netpolicy.c      | 74 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 92 insertions(+)

diff --git a/include/linux/netpolicy.h b/include/linux/netpolicy.h
index fc87d9b..a946b75c 100644
--- a/include/linux/netpolicy.h
+++ b/include/linux/netpolicy.h
@@ -30,9 +30,27 @@ struct netpolicy_dev_info {
 	u32	*tx_irq;
 };
 
+struct netpolicy_sys_map {
+	u32	cpu;
+	u32	queue;
+	u32	irq;
+};
+
+struct netpolicy_sys_info {
+	/*
+	 * Record the cpu and queue 1:1 mapping
+	 */
+	u32				avail_rx_num;
+	struct netpolicy_sys_map	*rx;
+	u32				avail_tx_num;
+	struct netpolicy_sys_map	*tx;
+};
+
 struct netpolicy_info {
 	enum netpolicy_name	cur_policy;
 	unsigned long avail_policy[BITS_TO_LONGS(NET_POLICY_MAX)];
+	/* cpu and queue mapping information */
+	struct netpolicy_sys_info	sys_info;
 };
 
 #endif /*__LINUX_NETPOLICY_H*/
diff --git a/net/core/netpolicy.c b/net/core/netpolicy.c
index 31c41ca..0972341 100644
--- a/net/core/netpolicy.c
+++ b/net/core/netpolicy.c
@@ -55,6 +55,80 @@ static u32 netpolicy_get_cpu_information(void)
 	return num_online_cpus();
 }
 
+static void netpolicy_free_sys_map(struct net_device *dev)
+{
+	struct netpolicy_sys_info *s_info = &dev->netpolicy->sys_info;
+
+	kfree(s_info->rx);
+	s_info->rx = NULL;
+	s_info->avail_rx_num = 0;
+	kfree(s_info->tx);
+	s_info->tx = NULL;
+	s_info->avail_tx_num = 0;
+}
+
+static int netpolicy_update_sys_map(struct net_device *dev,
+				    struct netpolicy_dev_info *d_info,
+				    u32 cpu)
+{
+	struct netpolicy_sys_info *s_info = &dev->netpolicy->sys_info;
+	u32 num, i, online_cpu;
+	cpumask_var_t cpumask;
+
+	if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
+		return -ENOMEM;
+
+	/* update rx cpu map */
+	if (cpu > d_info->rx_num)
+		num = d_info->rx_num;
+	else
+		num = cpu;
+
+	s_info->avail_rx_num = num;
+	s_info->rx = kcalloc(num, sizeof(*s_info->rx), GFP_ATOMIC);
+	if (!s_info->rx)
+		goto err;
+	cpumask_copy(cpumask, cpu_online_mask);
+
+	i = 0;
+	for_each_cpu(online_cpu, cpumask) {
+		if (i == num)
+			break;
+		s_info->rx[i].cpu = online_cpu;
+		s_info->rx[i].queue = i;
+		s_info->rx[i].irq = d_info->rx_irq[i];
+		i++;
+	}
+
+	/* update tx cpu map */
+	if (cpu >= d_info->tx_num)
+		num = d_info->tx_num;
+	else
+		num = cpu;
+
+	s_info->avail_tx_num = num;
+	s_info->tx = kcalloc(num, sizeof(*s_info->tx), GFP_ATOMIC);
+	if (!s_info->tx)
+		goto err;
+
+	i = 0;
+	for_each_cpu(online_cpu, cpumask) {
+		if (i == num)
+			break;
+		s_info->tx[i].cpu = online_cpu;
+		s_info->tx[i].queue = i;
+		s_info->tx[i].irq = d_info->tx_irq[i];
+		i++;
+	}
+
+	free_cpumask_var(cpumask);
+	return 0;
+err:
+	netpolicy_free_sys_map(dev);
+	free_cpumask_var(cpumask);
+	return -ENOMEM;
+}
+
 const char *policy_name[NET_POLICY_MAX] = {
 	"NONE"
 };
-- 
2.5.5

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ