[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1322836682.2762.8.camel@edumazet-laptop>
Date: Fri, 02 Dec 2011 15:38:02 +0100
From: Eric Dumazet <eric.dumazet@...il.com>
To: Sjur Brændeland
<sjur.brandeland@...ricsson.com>
Cc: netdev@...r.kernel.org, David Miller <davem@...emloft.net>,
Alexey Orishko <alexey.orishko@...ricsson.com>
Subject: Re: [PATCH 2/3] caif: Add support for flow-control on device's
tx-queue
Le vendredi 02 décembre 2011 à 15:06 +0100, Sjur Brændeland a écrit :
> Flow control is implemented by inspecting the qdisc queue length
> in order to detect potential overflow on the TX queue. When a threshold
> is reached flow-off is sent upwards in the CAIF stack. At the same time
> the skb->destructor is hi-jacked in order to detect when the last packet
> put on queue is consumed. When this "hi-jacked" packet is consumed, flow-on
> is sent upwards in the CAIF stack.
>
> Signed-off-by: Sjur Brændeland <sjur.brandeland@...ricsson.com>
> ---
> net/caif/caif_dev.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++++
> 1 files changed, 48 insertions(+), 0 deletions(-)
>
> diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
> index f7e8c70..415353e 100644
> --- a/net/caif/caif_dev.c
> +++ b/net/caif/caif_dev.c
> @@ -34,6 +34,7 @@ struct caif_device_entry {
> struct list_head list;
> struct net_device *netdev;
> int __percpu *pcpu_refcnt;
> + bool xoff;
> };
>
> struct caif_device_entry_list {
> @@ -48,6 +49,7 @@ struct caif_net {
> };
>
> static int caif_net_id;
> +static int q_high = 50; /* Percent */
>
> struct cfcnfg *get_cfcnfg(struct net *net)
> {
> @@ -126,9 +128,28 @@ static struct caif_device_entry *caif_get(struct net_device *dev)
> return NULL;
> }
>
> +void caif_flow_cb(struct sk_buff *skb)
> +{
> + struct caif_device_entry *caifd;
> + WARN_ON(skb->dev == NULL);
> +
> + rcu_read_lock();
> + caifd = caif_get(skb->dev);
> + caifd->xoff = 0;
> + caifd_hold(caifd);
> + rcu_read_unlock();
> +
> + caifd->layer.up->
> + ctrlcmd(caifd->layer.up,
> + _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND,
> + caifd->layer.id);
> + caifd_put(caifd);
> +}
> +
> static int transmit(struct cflayer *layer, struct cfpkt *pkt)
> {
> int err;
> + struct caif_dev_common *caifdev;
> struct caif_device_entry *caifd =
> container_of(layer, struct caif_device_entry, layer);
> struct sk_buff *skb;
> @@ -137,6 +158,33 @@ static int transmit(struct cflayer *layer, struct cfpkt *pkt)
> skb->dev = caifd->netdev;
> skb_reset_network_header(skb);
> skb->protocol = htons(ETH_P_CAIF);
> + caifdev = netdev_priv(caifd->netdev);
> +
> + if (caifdev->flowctrl == NULL && caifd->netdev->tx_queue_len > 0 &&
> + !caifd->xoff) {
> + struct netdev_queue *txq;
> + int high;
> +
> + txq = netdev_get_tx_queue(skb->dev, 0);
Why queue 0 and not another one ?
> + high = (caifd->netdev->tx_queue_len * q_high) / 100;
> +
> + /* If we run with a TX queue, check if the queue is too long*/
Are you sure only this cpu can run here ? any lock is held ?
> + if (netif_queue_stopped(caifd->netdev) ||
> + qdisc_qlen(txq->qdisc) > high) {
> +
> + pr_debug("queue stop(%d) or full (%d>%d) - XOFF\n",
> + netif_queue_stopped(caifd->netdev),
> + qdisc_qlen(txq->qdisc), high);
> + caifd->xoff = 1;
> + /* Hijack this skb free callback function. */
> + skb_orphan(skb);
> + skb->destructor = caif_flow_cb;
> + caifd->layer.up->
> + ctrlcmd(caifd->layer.up,
> + _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
> + caifd->layer.id);
> + }
> + }
>
> err = dev_queue_xmit(skb);
> if (err > 0)
What prevents dev_queue_xmit() to early orphan skb ?
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Powered by blists - more mailing lists