[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <AANLkTikNOstJr3qqMUqtUoOm1xHOjw5uM7NpQUNJz4PR@mail.gmail.com>
Date: Tue, 3 Aug 2010 12:05:34 +0800
From: Changli Gao <xiaosuo@...il.com>
To: Krishna Kumar <krkumar2@...ibm.com>
Cc: davem@...emloft.net, arnd@...db.de, bhutchings@...arflare.com,
netdev@...r.kernel.org, therbert@...gle.com, mst@...hat.com
Subject: Re: [PATCH v3 1/2] core: Factor out flow calculation from get_rps_cpu
On Tue, Aug 3, 2010 at 11:02 AM, Krishna Kumar <krkumar2@...ibm.com> wrote:
> From: Krishna Kumar <krkumar2@...ibm.com>
>
> Factor out flow calculation code from get_rps_cpu, since macvtap
> driver can use the same code.
>
> Revisions:
>
> v2 - Ben: Separate flow calcuation out and use in select queue
> v3 - Arnd: Don't re-implement MIN
>
> Signed-off-by: Krishna Kumar <krkumar2@...ibm.com>
> ---
> include/linux/netdevice.h | 1
> net/core/dev.c | 94 ++++++++++++++++++++++--------------
> 2 files changed, 59 insertions(+), 36 deletions(-)
>
> diff -ruNp org/include/linux/netdevice.h new/include/linux/netdevice.h
> --- org/include/linux/netdevice.h 2010-08-03 08:19:57.000000000 +0530
> +++ new/include/linux/netdevice.h 2010-08-03 08:19:57.000000000 +0530
> @@ -2253,6 +2253,7 @@ static inline const char *netdev_name(co
> return dev->name;
> }
>
> +extern int skb_calculate_flow(struct net_device *dev, struct sk_buff *skb);
> extern int netdev_printk(const char *level, const struct net_device *dev,
> const char *format, ...)
> __attribute__ ((format (printf, 3, 4)));
> diff -ruNp org/net/core/dev.c new/net/core/dev.c
> --- org/net/core/dev.c 2010-08-03 08:19:57.000000000 +0530
> +++ new/net/core/dev.c 2010-08-03 08:19:57.000000000 +0530
> @@ -2263,51 +2263,24 @@ static inline void ____napi_schedule(str
> __raise_softirq_irqoff(NET_RX_SOFTIRQ);
> }
>
> -#ifdef CONFIG_RPS
> -
> -/* One global table that all flow-based protocols share. */
> -struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
> -EXPORT_SYMBOL(rps_sock_flow_table);
> -
> /*
> - * get_rps_cpu is called from netif_receive_skb and returns the target
> - * CPU from the RPS map of the receiving queue for a given skb.
> - * rcu_read_lock must be held on entry.
> + * skb_calculate_flow: calculate a flow hash based on src/dst addresses
> + * and src/dst port numbers. On success, returns a hash number (> 0),
> + * otherwise -1.
> */
> -static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
> - struct rps_dev_flow **rflowp)
> +int skb_calculate_flow(struct net_device *dev, struct sk_buff *skb)
> {
> + int hash = skb->rxhash;
> struct ipv6hdr *ip6;
> struct iphdr *ip;
> - struct netdev_rx_queue *rxqueue;
> - struct rps_map *map;
> - struct rps_dev_flow_table *flow_table;
> - struct rps_sock_flow_table *sock_flow_table;
> - int cpu = -1;
> u8 ip_proto;
> - u16 tcpu;
> u32 addr1, addr2, ihl;
> union {
> u32 v32;
> u16 v16[2];
> } ports;
>
> - if (skb_rx_queue_recorded(skb)) {
> - u16 index = skb_get_rx_queue(skb);
> - if (unlikely(index >= dev->num_rx_queues)) {
> - WARN_ONCE(dev->num_rx_queues > 1, "%s received packet "
> - "on queue %u, but number of RX queues is %u\n",
> - dev->name, index, dev->num_rx_queues);
> - goto done;
> - }
> - rxqueue = dev->_rx + index;
> - } else
> - rxqueue = dev->_rx;
> -
> - if (!rxqueue->rps_map && !rxqueue->rps_flow_table)
> - goto done;
> -
> - if (skb->rxhash)
> + if (hash)
> goto got_hash; /* Skip hash computation on packet header */
>
> switch (skb->protocol) {
> @@ -2334,6 +2307,7 @@ static int get_rps_cpu(struct net_device
> default:
> goto done;
> }
> +
> switch (ip_proto) {
> case IPPROTO_TCP:
> case IPPROTO_UDP:
> @@ -2356,11 +2330,59 @@ static int get_rps_cpu(struct net_device
> /* get a consistent hash (same value on both flow directions) */
> if (addr2 < addr1)
> swap(addr1, addr2);
> - skb->rxhash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
> - if (!skb->rxhash)
> - skb->rxhash = 1;
> +
> + hash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
> + if (!hash)
> + hash = 1;
>
> got_hash:
> + return hash;
> +
> +done:
> + return -1;
> +}
> +EXPORT_SYMBOL(skb_calculate_flow);
I have noticed that you use skb_calculate_flow() in
macvtap_get_queue() where skb->data doesn't point to the network
header but the ethernet header. However, skb_calculate_flow() assume
skb->data points to the network header. There are two choices:
* update skb_calculate_flow to support called in ethernet layer.
* pull skb before skb_calculate_flow, and push skb after
skb_calculate_flow() in macvtap_get_queue().
I prefer the former way.
BTW: the function name skb_calculate_flow isn't good. How about
skb_get_rxhash(). Maybe we can implement two versions: fast path and
slow path. And implement the fast path version as a inline function in
skbuff.h.
static inline u32 skb_get_rxhash(struct sk_buff *skb)
{
u32 rxhash;
rxhash = skb->rxhash;
if (!rxhash)
return __skb_get_rxhash(skb);
return rxhash;
}
> +
> +#ifdef CONFIG_RPS
> +
> +/* One global table that all flow-based protocols share. */
> +struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
> +EXPORT_SYMBOL(rps_sock_flow_table);
> +
> +/*
> + * get_rps_cpu is called from netif_receive_skb and returns the target
> + * CPU from the RPS map of the receiving queue for a given skb.
> + * rcu_read_lock must be held on entry.
> + */
> +static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
> + struct rps_dev_flow **rflowp)
> +{
> + struct netdev_rx_queue *rxqueue;
> + struct rps_map *map;
> + struct rps_dev_flow_table *flow_table;
> + struct rps_sock_flow_table *sock_flow_table;
> + int cpu = -1;
> + u16 tcpu;
> +
> + if (skb_rx_queue_recorded(skb)) {
> + u16 index = skb_get_rx_queue(skb);
> + if (unlikely(index >= dev->num_rx_queues)) {
> + WARN_ONCE(dev->num_rx_queues > 1, "%s received packet "
> + "on queue %u, but number of RX queues is %u\n",
> + dev->name, index, dev->num_rx_queues);
> + goto done;
> + }
> + rxqueue = dev->_rx + index;
> + } else
> + rxqueue = dev->_rx;
> +
> + if (!rxqueue->rps_map && !rxqueue->rps_flow_table)
> + goto done;
> +
> + skb->rxhash = skb_calculate_flow(dev, skb);
> + if (skb->rxhash < 0)
> + goto done;
> +
> flow_table = rcu_dereference(rxqueue->rps_flow_table);
> sock_flow_table = rcu_dereference(rps_sock_flow_table);
> if (flow_table && sock_flow_table) {
> --
> To unsubscribe from this list: send the line "unsubscribe netdev" in
> the body of a message to majordomo@...r.kernel.org
> More majordomo info at http://vger.kernel.org/majordomo-info.html
>
--
Regards,
Changli Gao(xiaosuo@...il.com)
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Powered by blists - more mailing lists