lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <OF46AA822C.08F8EAED-ON65257774.001F5AE3-65257774.00209BBF@in.ibm.com>
Date:	Tue, 3 Aug 2010 11:27:44 +0530
From:	Krishna Kumar2 <krkumar2@...ibm.com>
To:	Changli Gao <xiaosuo@...il.com>
Cc:	arnd@...db.de, bhutchings@...arflare.com, davem@...emloft.net,
	mst@...hat.com, netdev@...r.kernel.org, therbert@...gle.com
Subject: Re: [PATCH v3 1/2] core: Factor out flow calculation from get_rps_cpu

Hi Changli,

Good catch.

Instead of adding support for ethernet header or pull/push,
I could defer the skb_push(ETH_HLEN), something like:

static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
{
	struct macvtap_queue *q = macvtap_get_queue(dev, skb);
	if (!q)
		goto drop;

	if (skb_queue_len(&q->sk.sk_receive_queue) >= dev->tx_queue_len)
		goto drop;

+	skb_push(skb, ETH_HLEN);
	...
}

and remove the same in macvtap_receive. Will this be better?

Your other suggestions also looks good.

Thanks,

- KK

Changli Gao <xiaosuo@...il.com> wrote on 08/03/2010 09:35:34 AM:

> Changli Gao <xiaosuo@...il.com>
> 08/03/2010 09:35 AM
>
> To
>
> Krishna Kumar2/India/IBM@...IN
>
> cc
>
> davem@...emloft.net, arnd@...db.de, bhutchings@...arflare.com,
> netdev@...r.kernel.org, therbert@...gle.com, mst@...hat.com
>
> Subject
>
> Re: [PATCH v3 1/2] core: Factor out flow calculation from get_rps_cpu
>
> On Tue, Aug 3, 2010 at 11:02 AM, Krishna Kumar <krkumar2@...ibm.com>
wrote:
> > From: Krishna Kumar <krkumar2@...ibm.com>
> >
> > Factor out flow calculation code from get_rps_cpu, since macvtap
> > driver can use the same code.
> >
> > Revisions:
> >
> > v2 - Ben: Separate flow calcuation out and use in select queue
> > v3 - Arnd: Don't re-implement MIN
> >
> > Signed-off-by: Krishna Kumar <krkumar2@...ibm.com>
> > ---
> >  include/linux/netdevice.h |    1
> >  net/core/dev.c            |   94 ++++++++++++++++++++++--------------
> >  2 files changed, 59 insertions(+), 36 deletions(-)
> >
> > diff -ruNp org/include/linux/netdevice.h new/include/linux/netdevice.h
> > --- org/include/linux/netdevice.h       2010-08-03 08:19:57.000000000
+0530
> > +++ new/include/linux/netdevice.h       2010-08-03 08:19:57.000000000
+0530
> > @@ -2253,6 +2253,7 @@ static inline const char *netdev_name(co
> >        return dev->name;
> >  }
> >
> > +extern int skb_calculate_flow(struct net_device *dev, struct sk_buff
*skb);
> >  extern int netdev_printk(const char *level, const struct net_device
*dev,
> >                         const char *format, ...)
> >        __attribute__ ((format (printf, 3, 4)));
> > diff -ruNp org/net/core/dev.c new/net/core/dev.c
> > --- org/net/core/dev.c  2010-08-03 08:19:57.000000000 +0530
> > +++ new/net/core/dev.c  2010-08-03 08:19:57.000000000 +0530
> > @@ -2263,51 +2263,24 @@ static inline void ____napi_schedule(str
> >        __raise_softirq_irqoff(NET_RX_SOFTIRQ);
> >  }
> >
> > -#ifdef CONFIG_RPS
> > -
> > -/* One global table that all flow-based protocols share. */
> > -struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
> > -EXPORT_SYMBOL(rps_sock_flow_table);
> > -
> >  /*
> > - * get_rps_cpu is called from netif_receive_skb and returns the target
> > - * CPU from the RPS map of the receiving queue for a given skb.
> > - * rcu_read_lock must be held on entry.
> > + * skb_calculate_flow: calculate a flow hash based on src/dst
addresses
> > + * and src/dst port numbers. On success, returns a hash number (> 0),
> > + * otherwise -1.
> >  */
> > -static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
> > -                      struct rps_dev_flow **rflowp)
> > +int skb_calculate_flow(struct net_device *dev, struct sk_buff *skb)
> >  {
> > +       int hash = skb->rxhash;
> >        struct ipv6hdr *ip6;
> >        struct iphdr *ip;
> > -       struct netdev_rx_queue *rxqueue;
> > -       struct rps_map *map;
> > -       struct rps_dev_flow_table *flow_table;
> > -       struct rps_sock_flow_table *sock_flow_table;
> > -       int cpu = -1;
> >        u8 ip_proto;
> > -       u16 tcpu;
> >        u32 addr1, addr2, ihl;
> >        union {
> >                u32 v32;
> >                u16 v16[2];
> >        } ports;
> >
> > -       if (skb_rx_queue_recorded(skb)) {
> > -               u16 index = skb_get_rx_queue(skb);
> > -               if (unlikely(index >= dev->num_rx_queues)) {
> > -                       WARN_ONCE(dev->num_rx_queues > 1, "%s
> received packet "
> > -                               "on queue %u, but number of RX
> queues is %u\n",
> > -                               dev->name, index, dev->num_rx_queues);
> > -                       goto done;
> > -               }
> > -               rxqueue = dev->_rx + index;
> > -       } else
> > -               rxqueue = dev->_rx;
> > -
> > -       if (!rxqueue->rps_map && !rxqueue->rps_flow_table)
> > -               goto done;
> > -
> > -       if (skb->rxhash)
> > +       if (hash)
> >                goto got_hash; /* Skip hash computation on packet header
*/
> >
> >        switch (skb->protocol) {
> > @@ -2334,6 +2307,7 @@ static int get_rps_cpu(struct net_device
> >        default:
> >                goto done;
> >        }
> > +
> >        switch (ip_proto) {
> >        case IPPROTO_TCP:
> >        case IPPROTO_UDP:
> > @@ -2356,11 +2330,59 @@ static int get_rps_cpu(struct net_device
> >        /* get a consistent hash (same value on both flow directions) */
> >        if (addr2 < addr1)
> >                swap(addr1, addr2);
> > -       skb->rxhash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
> > -       if (!skb->rxhash)
> > -               skb->rxhash = 1;
> > +
> > +       hash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
> > +       if (!hash)
> > +               hash = 1;
> >
> >  got_hash:
> > +       return hash;
> > +
> > +done:
> > +       return -1;
> > +}
> > +EXPORT_SYMBOL(skb_calculate_flow);
>
> I have noticed that you use skb_calculate_flow() in
> macvtap_get_queue() where skb->data doesn't point to the network
> header but the ethernet header. However, skb_calculate_flow() assume
> skb->data points to the network header. There are two choices:
>  * update skb_calculate_flow to support called in ethernet layer.
>  * pull skb before skb_calculate_flow, and push skb after
> skb_calculate_flow() in macvtap_get_queue().
>
> I prefer the former way.
>
> BTW: the function name skb_calculate_flow isn't good. How about
> skb_get_rxhash(). Maybe we can implement two versions: fast path and
> slow path. And implement the fast path version as a inline function in
> skbuff.h.
>
> static inline u32 skb_get_rxhash(struct sk_buff *skb)
> {
>         u32 rxhash;
>
>         rxhash = skb->rxhash;
>         if (!rxhash)
>                 return __skb_get_rxhash(skb);
>         return rxhash;
> }
>
>
> > +
> > +#ifdef CONFIG_RPS
> > +
> > +/* One global table that all flow-based protocols share. */
> > +struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
> > +EXPORT_SYMBOL(rps_sock_flow_table);
> > +
> > +/*
> > + * get_rps_cpu is called from netif_receive_skb and returns the target
> > + * CPU from the RPS map of the receiving queue for a given skb.
> > + * rcu_read_lock must be held on entry.
> > + */
> > +static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
> > +                      struct rps_dev_flow **rflowp)
> > +{
> > +       struct netdev_rx_queue *rxqueue;
> > +       struct rps_map *map;
> > +       struct rps_dev_flow_table *flow_table;
> > +       struct rps_sock_flow_table *sock_flow_table;
> > +       int cpu = -1;
> > +       u16 tcpu;
> > +
> > +       if (skb_rx_queue_recorded(skb)) {
> > +               u16 index = skb_get_rx_queue(skb);
> > +               if (unlikely(index >= dev->num_rx_queues)) {
> > +                       WARN_ONCE(dev->num_rx_queues > 1, "%s
> received packet "
> > +                               "on queue %u, but number of RX
> queues is %u\n",
> > +                               dev->name, index, dev->num_rx_queues);
> > +                       goto done;
> > +               }
> > +               rxqueue = dev->_rx + index;
> > +       } else
> > +               rxqueue = dev->_rx;
> > +
> > +       if (!rxqueue->rps_map && !rxqueue->rps_flow_table)
> > +               goto done;
> > +
> > +       skb->rxhash = skb_calculate_flow(dev, skb);
> > +       if (skb->rxhash < 0)
> > +               goto done;
> > +
> >        flow_table = rcu_dereference(rxqueue->rps_flow_table);
> >        sock_flow_table = rcu_dereference(rps_sock_flow_table);
> >        if (flow_table && sock_flow_table) {
> > --
> > To unsubscribe from this list: send the line "unsubscribe netdev" in
> > the body of a message to majordomo@...r.kernel.org
> > More majordomo info at  http://vger.kernel.org/majordomo-info.html
> >
>
>
>
> --
> Regards,
> Changli Gao(xiaosuo@...il.com)

--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ