[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <9AAE0902D5BC7E449B7C8E4E778ABCD027D64B@AMSPEX01CL01.citrite.net>
Date: Mon, 10 Mar 2014 15:12:07 +0000
From: Paul Durrant <Paul.Durrant@...rix.com>
To: Annie Li <Annie.li@...cle.com>,
"xen-devel@...ts.xen.org" <xen-devel@...ts.xen.org>,
"netdev@...r.kernel.org" <netdev@...r.kernel.org>
CC: "konrad.wilk@...cle.com" <konrad.wilk@...cle.com>,
Ian Campbell <Ian.Campbell@...rix.com>,
Wei Liu <wei.liu2@...rix.com>
Subject: RE: [PATCH net V2] Xen-netback: Fix issue caused by using gso_type
wrongly
> -----Original Message-----
> From: Annie Li [mailto:Annie.li@...cle.com]
> Sent: 10 March 2014 14:59
> To: xen-devel@...ts.xen.org; netdev@...r.kernel.org
> Cc: konrad.wilk@...cle.com; Ian Campbell; Wei Liu; Paul Durrant;
> annie.li@...cle.com
> Subject: [PATCH net V2] Xen-netback: Fix issue caused by using gso_type
> wrongly
>
> From: Annie Li <annie.li@...cle.com>
>
> Current netback uses gso_type to check whether the skb contains
> gso offload, and this is wrong. Gso_size is the right one to
> check gso existence, and gso_type is only used to check gso type.
>
> Some skbs contains nonzero gso_type and zero gso_size, current
> netback would treat these skbs as gso and create wrong response
> for this. This also causes ssh failure to domu from other server.
>
> V2: use skb_is_gso function as Paul Durrant suggested
>
> Signed-off-by: Annie Li <annie.li@...cle.com>
Reviewed-by: Paul Durrant <paul.durrant@...rix.com>
> ---
> drivers/net/xen-netback/netback.c | 39 +++++++++++++++++---------------
> ----
> 1 files changed, 18 insertions(+), 21 deletions(-)
>
> diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-
> netback/netback.c
> index e5284bc..438d0c0 100644
> --- a/drivers/net/xen-netback/netback.c
> +++ b/drivers/net/xen-netback/netback.c
> @@ -240,7 +240,7 @@ static void xenvif_gop_frag_copy(struct xenvif *vif,
> struct sk_buff *skb,
> struct gnttab_copy *copy_gop;
> struct xenvif_rx_meta *meta;
> unsigned long bytes;
> - int gso_type;
> + int gso_type = XEN_NETIF_GSO_TYPE_NONE;
>
> /* Data must not cross a page boundary. */
> BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
> @@ -299,12 +299,12 @@ static void xenvif_gop_frag_copy(struct xenvif *vif,
> struct sk_buff *skb,
> }
>
> /* Leave a gap for the GSO descriptor. */
> - if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
> - gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
> - else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
> - gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
> - else
> - gso_type = XEN_NETIF_GSO_TYPE_NONE;
> + if (skb_is_gso(skb)) {
> + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
> + gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
> + else if (skb_shinfo(skb)->gso_type &
> SKB_GSO_TCPV6)
> + gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
> + }
>
> if (*head && ((1 << gso_type) & vif->gso_mask))
> vif->rx.req_cons++;
> @@ -338,19 +338,15 @@ static int xenvif_gop_skb(struct sk_buff *skb,
> int head = 1;
> int old_meta_prod;
> int gso_type;
> - int gso_size;
>
> old_meta_prod = npo->meta_prod;
>
> - if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
> - gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
> - gso_size = skb_shinfo(skb)->gso_size;
> - } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
> - gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
> - gso_size = skb_shinfo(skb)->gso_size;
> - } else {
> - gso_type = XEN_NETIF_GSO_TYPE_NONE;
> - gso_size = 0;
> + gso_type = XEN_NETIF_GSO_TYPE_NONE;
> + if (skb_is_gso(skb)) {
> + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
> + gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
> + else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
> + gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
> }
>
> /* Set up a GSO prefix descriptor, if necessary */
> @@ -358,7 +354,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
> req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
> meta = npo->meta + npo->meta_prod++;
> meta->gso_type = gso_type;
> - meta->gso_size = gso_size;
> + meta->gso_size = skb_shinfo(skb)->gso_size;
> meta->size = 0;
> meta->id = req->id;
> }
> @@ -368,7 +364,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
>
> if ((1 << gso_type) & vif->gso_mask) {
> meta->gso_type = gso_type;
> - meta->gso_size = gso_size;
> + meta->gso_size = skb_shinfo(skb)->gso_size;
> } else {
> meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
> meta->gso_size = 0;
> @@ -500,8 +496,9 @@ static void xenvif_rx_action(struct xenvif *vif)
> size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
> max_slots_needed += DIV_ROUND_UP(size,
> PAGE_SIZE);
> }
> - if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
> - skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
> + if (skb_is_gso(skb) &&
> + (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
> + skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
> max_slots_needed++;
>
> /* If the skb may not fit then bail out now */
> --
> 1.7.3.4
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Powered by blists - more mailing lists