lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <Pine.LNX.4.63.0706121647280.4120@ranjit.corp.google.com>
Date:	Tue, 12 Jun 2007 16:53:21 -0700 (PDT)
From:	Ranjit Manomohan <ranjitm@...gle.com>
To:	netdev@...r.kernel.org
cc:	ranjitm@...gle.com
Subject: [PATCH][NET_SCHED] Make HTB scheduler work with TSO.

Currently the HTB scheduler does not correctly account for TSO packets 
which causes large inaccuracies in the bandwidth control when using TSO.
This patch allows the HTB scheduler to work with TSO enabled devices.

Signed-off-by: Ranjit Manomohan <ranjitm@...gle.com>

diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 035788c..e872724 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -153,15 +153,12 @@ #endif
  				/* of un.leaf originals should be done. */
  };

-/* TODO: maybe compute rate when size is too large .. or drop ? */
  static inline long L2T(struct htb_class *cl, struct qdisc_rate_table *rate,
  			   int size)
  {
  	int slot = size >> rate->rate.cell_log;
-	if (slot > 255) {
-		cl->xstats.giants++;
-		slot = 255;
-	}
+	if (slot > 255)
+		return (rate->data[255]*(slot >> 8) + rate->data[slot & 0xFF]);
  	return rate->data[slot];
  }

@@ -634,13 +631,14 @@ #endif
  		cl->qstats.drops++;
  		return NET_XMIT_DROP;
  	} else {
-		cl->bstats.packets++;
+		cl->bstats.packets +=
+			skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
  		cl->bstats.bytes += skb->len;
  		htb_activate(q, cl);
  	}

  	sch->q.qlen++;
-	sch->bstats.packets++;
+	sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
  	sch->bstats.bytes += skb->len;
  	return NET_XMIT_SUCCESS;
  }
@@ -717,8 +715,9 @@ #endif
   * In such case we remove class from event queue first.
   */
  static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
-			     int level, int bytes)
+			     int level, struct sk_buff *skb)
  {
+	int bytes = skb->len;
  	long toks, diff;
  	enum htb_cmode old_mode;

@@ -753,13 +752,15 @@ #define HTB_ACCNT(T,B,R) toks = diff + c
  #ifdef HTB_RATECM
  		/* update rate counters */
  		cl->sum_bytes += bytes;
-		cl->sum_packets++;
+		cl->sum_packets += skb_is_gso(skb)?
+				skb_shinfo(skb)->gso_segs:1;
  #endif

  		/* update byte stats except for leaves which are already updated */
  		if (cl->level) {
  			cl->bstats.bytes += bytes;
-			cl->bstats.packets++;
+			cl->bstats.packets += skb_is_gso(skb)?
+					skb_shinfo(skb)->gso_segs:1;
  		}
  		cl = cl->parent;
  	}
@@ -943,7 +944,7 @@ next:
  		   gives us slightly better performance */
  		if (!cl->un.leaf.q->q.qlen)
  			htb_deactivate(q, cl);
-		htb_charge_class(q, cl, level, skb->len);
+		htb_charge_class(q, cl, level, skb);
  	}
  	return skb;
  }

-
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ