lists.openwall.net | lists / announce owl-users owl-dev john-users john-dev passwdqc-users yescrypt popa3d-users / oss-security kernel-hardening musl sabotage tlsify passwords / crypt-dev xvendor / Bugtraq Full-Disclosure linux-kernel linux-netdev linux-ext4 linux-hardening PHC | |
Open Source and information security mailing list archives
| ||
|
Date: Sat, 2 May 2020 19:54:21 -0700 From: Eric Dumazet <edumazet@...gle.com> To: "David S . Miller" <davem@...emloft.net> Cc: netdev <netdev@...r.kernel.org>, Eric Dumazet <edumazet@...gle.com>, Eric Dumazet <eric.dumazet@...il.com> Subject: [PATCH net-next 4/5] net_sched: sch_fq: do not call fq_peek() twice per packet This refactors the code to not call fq_peek() from fq_dequeue_head() since the caller can provide the skb. Also rename fq_dequeue_head() to fq_dequeue_skb() because 'head' is a bit vague, given the skb could come from t_root rb-tree. Signed-off-by: Eric Dumazet <edumazet@...gle.com> --- net/sched/sch_fq.c | 34 ++++++++++++++++------------------ 1 file changed, 16 insertions(+), 18 deletions(-) diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index 56e4f3c4380c517136b22862771f9899a7fd99f2..4a28f611edf0cd4ac7fb53fc1c2a4ba12060bf59 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -388,19 +388,17 @@ static void fq_erase_head(struct Qdisc *sch, struct fq_flow *flow, } } -/* remove one skb from head of flow queue */ -static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow) +/* Remove one skb from flow queue. + * This skb must be the return value of prior fq_peek(). + */ +static void fq_dequeue_skb(struct Qdisc *sch, struct fq_flow *flow, + struct sk_buff *skb) { - struct sk_buff *skb = fq_peek(flow); - - if (skb) { - fq_erase_head(sch, flow, skb); - skb_mark_not_on_list(skb); - flow->qlen--; - qdisc_qstats_backlog_dec(sch, skb); - sch->q.qlen--; - } - return skb; + fq_erase_head(sch, flow, skb); + skb_mark_not_on_list(skb); + flow->qlen--; + qdisc_qstats_backlog_dec(sch, skb); + sch->q.qlen--; } static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb) @@ -538,9 +536,11 @@ static struct sk_buff *fq_dequeue(struct Qdisc *sch) if (!sch->q.qlen) return NULL; - skb = fq_dequeue_head(sch, &q->internal); - if (skb) + skb = fq_peek(&q->internal); + if (unlikely(skb)) { + fq_dequeue_skb(sch, &q->internal, skb); goto out; + } q->ktime_cache = now = ktime_get_ns(); fq_check_throttled(q, now); @@ -580,10 +580,8 @@ static struct sk_buff *fq_dequeue(struct Qdisc *sch) INET_ECN_set_ce(skb); q->stat_ce_mark++; } - } - - skb = fq_dequeue_head(sch, f); - if (!skb) { + fq_dequeue_skb(sch, f, skb); + } else { head->first = f->next; /* force a pass through old_flows to prevent starvation */ if ((head == &q->new_flows) && q->old_flows.first) { -- 2.26.2.526.g744177e7f7-goog
Powered by blists - more mailing lists