[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1420076354-4861-18-git-send-email-kan.liang@intel.com>
Date: Wed, 31 Dec 2014 20:39:06 -0500
From: kan.liang@...el.com
To: davem@...emloft.net, linux-kernel@...r.kernel.org,
netdev@...r.kernel.org
Cc: jeffrey.t.kirsher@...el.com, mingo@...hat.com,
peterz@...radead.org, kuznet@....inr.ac.ru, jmorris@...ei.org,
yoshfuji@...ux-ipv6.org, kaber@...sh.net,
akpm@...ux-foundation.org, keescook@...omium.org,
viro@...iv.linux.org.uk, gorcunov@...nvz.org,
john.stultz@...aro.org, aduyck@...antis.com, ben@...adent.org.uk,
decot@...glers.com, fw@...len.de, alexander.duyck@...il.com,
daniel@...earbox.net, tom@...bertland.com, rdunlap@...radead.org,
xiyou.wangcong@...il.com, hannes@...essinduktion.org,
jesse.brandeburg@...el.com, andi@...stfloor.org,
Kan Liang <kan.liang@...el.com>
Subject: [RFC V2 PATCH 17/25] net/netpolicy: introduce netpolicy_pick_queue
From: Kan Liang <kan.liang@...el.com>
To achieve better network performance, the key step is to distribute the
packets to dedicated queues according to policy and system run time
status.
This patch provides an interface which can return the proper dedicated
queue for socket/task. Then the packets of the socket/task will be
redirect to the dedicated queue for better network performance.
For selecting the proper queue, currently it uses round-robin algorithm
to find the available object from the given policy object list. The
algorithm is good enough for now. But it could be improved by some
adaptive algorithm later.
The selected object will be stored in hashtable. So it does not need to
go through the whole object list every time.
Signed-off-by: Kan Liang <kan.liang@...el.com>
---
include/linux/netpolicy.h | 5 ++
net/core/netpolicy.c | 136 ++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 141 insertions(+)
diff --git a/include/linux/netpolicy.h b/include/linux/netpolicy.h
index 5900252..a522015 100644
--- a/include/linux/netpolicy.h
+++ b/include/linux/netpolicy.h
@@ -97,6 +97,7 @@ extern void update_netpolicy_sys_map(void);
extern int netpolicy_register(struct netpolicy_instance *instance,
enum netpolicy_name policy);
extern void netpolicy_unregister(struct netpolicy_instance *instance);
+extern int netpolicy_pick_queue(struct netpolicy_instance *instance, bool is_rx);
#else
static inline void update_netpolicy_sys_map(void)
{
@@ -111,6 +112,10 @@ static inline void netpolicy_unregister(struct netpolicy_instance *instance)
{
}
+static inline int netpolicy_pick_queue(struct netpolicy_instance *instance, bool is_rx)
+{
+ return 0;
+}
#endif
#endif /*__LINUX_NETPOLICY_H*/
diff --git a/net/core/netpolicy.c b/net/core/netpolicy.c
index 3605761..98ca430 100644
--- a/net/core/netpolicy.c
+++ b/net/core/netpolicy.c
@@ -290,6 +290,142 @@ static void netpolicy_record_clear_dev_node(struct net_device *dev)
spin_unlock_bh(&np_hashtable_lock);
}
+static struct netpolicy_object *get_avail_object(struct net_device *dev,
+ enum netpolicy_name policy,
+ bool is_rx)
+{
+ int dir = is_rx ? NETPOLICY_RX : NETPOLICY_TX;
+ struct netpolicy_object *tmp, *obj = NULL;
+ int val = -1;
+
+ /* Check if net policy is supported */
+ if (!dev || !dev->netpolicy)
+ return NULL;
+
+ /* The system should have queues which support the request policy. */
+ if ((policy != dev->netpolicy->cur_policy) &&
+ (dev->netpolicy->cur_policy != NET_POLICY_MIX))
+ return NULL;
+
+ spin_lock_bh(&dev->np_ob_list_lock);
+ list_for_each_entry(tmp, &dev->netpolicy->obj_list[dir][policy], list) {
+ if ((val > atomic_read(&tmp->refcnt)) ||
+ (val == -1)) {
+ val = atomic_read(&tmp->refcnt);
+ obj = tmp;
+ }
+ }
+
+ if (WARN_ON(!obj)) {
+ spin_unlock_bh(&dev->np_ob_list_lock);
+ return NULL;
+ }
+ atomic_inc(&obj->refcnt);
+ spin_unlock_bh(&dev->np_ob_list_lock);
+
+ return obj;
+}
+
+static int get_avail_queue(struct netpolicy_instance *instance, bool is_rx)
+{
+ struct netpolicy_record *old_record, *new_record;
+ struct net_device *dev = instance->dev;
+ unsigned long ptr_id = (uintptr_t)instance->ptr;
+ int queue = -1;
+
+ spin_lock_bh(&np_hashtable_lock);
+ old_record = netpolicy_record_search(ptr_id);
+ if (!old_record) {
+ pr_warn("NETPOLICY: doesn't registered. Remove net policy settings!\n");
+ instance->policy = NET_POLICY_INVALID;
+ goto err;
+ }
+
+ if (is_rx && old_record->rx_obj) {
+ queue = old_record->rx_obj->queue;
+ } else if (!is_rx && old_record->tx_obj) {
+ queue = old_record->tx_obj->queue;
+ } else {
+ new_record = kzalloc(sizeof(*new_record), GFP_KERNEL);
+ if (!new_record)
+ goto err;
+ memcpy(new_record, old_record, sizeof(*new_record));
+
+ if (is_rx) {
+ new_record->rx_obj = get_avail_object(dev, new_record->policy, is_rx);
+ if (!new_record->dev)
+ new_record->dev = dev;
+ if (!new_record->rx_obj) {
+ kfree(new_record);
+ goto err;
+ }
+ queue = new_record->rx_obj->queue;
+ } else {
+ new_record->tx_obj = get_avail_object(dev, new_record->policy, is_rx);
+ if (!new_record->dev)
+ new_record->dev = dev;
+ if (!new_record->tx_obj) {
+ kfree(new_record);
+ goto err;
+ }
+ queue = new_record->tx_obj->queue;
+ }
+ /* update record */
+ hlist_replace_rcu(&old_record->hash_node, &new_record->hash_node);
+ kfree(old_record);
+ }
+err:
+ spin_unlock_bh(&np_hashtable_lock);
+ return queue;
+}
+
+static inline bool policy_validate(struct netpolicy_instance *instance)
+{
+ struct net_device *dev = instance->dev;
+ enum netpolicy_name cur_policy;
+
+ cur_policy = dev->netpolicy->cur_policy;
+ if ((instance->policy == NET_POLICY_NONE) ||
+ (cur_policy == NET_POLICY_NONE))
+ return false;
+
+ if (((cur_policy != NET_POLICY_MIX) && (cur_policy != instance->policy)) ||
+ ((cur_policy == NET_POLICY_MIX) && (instance->policy == NET_POLICY_CPU))) {
+ pr_warn("NETPOLICY: %s current device policy %s doesn't support required policy %s! Remove net policy settings!\n",
+ dev->name, policy_name[cur_policy],
+ policy_name[instance->policy]);
+ return false;
+ }
+ return true;
+}
+
+/**
+ * netpolicy_pick_queue() - Find proper queue
+ * @instance: NET policy per socket/task instance info
+ * @is_rx: RX queue or TX queue
+ *
+ * This function intends to find the proper queue according to policy.
+ * For selecting the proper queue, currently it uses round-robin algorithm
+ * to find the available object from the given policy object list.
+ * The selected object will be stored in hashtable. So it does not need to
+ * go through the whole object list every time.
+ *
+ * Return: negative on failure, otherwise on the assigned queue
+ */
+int netpolicy_pick_queue(struct netpolicy_instance *instance, bool is_rx)
+{
+ struct net_device *dev = instance->dev;
+
+ if (!dev || !dev->netpolicy)
+ return -EINVAL;
+
+ if (!policy_validate(instance))
+ return -EINVAL;
+
+ return get_avail_queue(instance, is_rx);
+}
+EXPORT_SYMBOL(netpolicy_pick_queue);
+
/**
* netpolicy_register() - Register per socket/task policy request
* @instance: NET policy per socket/task instance info
--
2.5.5
Powered by blists - more mailing lists