[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <b8fffe6f-e4c8-1412-1194-0ed65c27989f@pensando.io>
Date: Mon, 1 Jul 2019 11:17:08 -0700
From: Shannon Nelson <snelson@...sando.io>
To: Jakub Kicinski <jakub.kicinski@...ronome.com>
Cc: netdev@...r.kernel.org
Subject: Re: [PATCH v2 net-next 14/19] ionic: Add Tx and Rx handling
On 6/29/19 11:57 AM, Jakub Kicinski wrote:
> On Fri, 28 Jun 2019 14:39:29 -0700, Shannon Nelson wrote:
>> +static int ionic_tx(struct queue *q, struct sk_buff *skb)
>> +{
>> + struct tx_stats *stats = q_to_tx_stats(q);
>> + int err;
>> +
>> + if (skb->ip_summed == CHECKSUM_PARTIAL)
>> + err = ionic_tx_calc_csum(q, skb);
>> + else
>> + err = ionic_tx_calc_no_csum(q, skb);
>> + if (err)
>> + return err;
>> +
>> + err = ionic_tx_skb_frags(q, skb);
>> + if (err)
>> + return err;
>> +
>> + skb_tx_timestamp(skb);
>> + stats->pkts++;
>> + stats->bytes += skb->len;
> Presumably this is 64bit so you should use
> u64_stats_update_begin()
> u64_stats_update_end()
> around it (and all other stats).
Since the device won't work in a 32-bit arch and I have the the Kconfig
set to depend on 64BIT, I wasn't sure I needed to bother with the extra
syntactic sugar.
>> +
>> + ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);
>> +
>> + return 0;
>> +}
>> +
>> +static int ionic_tx_descs_needed(struct queue *q, struct sk_buff *skb)
>> +{
>> + struct tx_stats *stats = q_to_tx_stats(q);
>> + int err;
>> +
>> + /* If TSO, need roundup(skb->len/mss) descs */
>> + if (skb_is_gso(skb))
>> + return (skb->len / skb_shinfo(skb)->gso_size) + 1;
>> +
>> + /* If non-TSO, just need 1 desc and nr_frags sg elems */
>> + if (skb_shinfo(skb)->nr_frags <= IONIC_TX_MAX_SG_ELEMS)
>> + return 1;
>> +
>> + /* Too many frags, so linearize */
>> + err = skb_linearize(skb);
>> + if (err)
>> + return err;
>> +
>> + stats->linearize++;
>> +
>> + /* Need 1 desc and zero sg elems */
>> + return 1;
>> +}
>> +
>> +netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
>> +{
>> + u16 queue_index = skb_get_queue_mapping(skb);
>> + struct lif *lif = netdev_priv(netdev);
>> + struct queue *q;
>> + int ndescs;
>> + int err;
>> +
>> + if (unlikely(!test_bit(LIF_UP, lif->state))) {
>> + dev_kfree_skb(skb);
>> + return NETDEV_TX_OK;
>> + }
>> +
>> + if (likely(lif_to_txqcq(lif, queue_index)))
>> + q = lif_to_txq(lif, queue_index);
>> + else
>> + q = lif_to_txq(lif, 0);
>> +
>> + ndescs = ionic_tx_descs_needed(q, skb);
>> + if (ndescs < 0)
>> + goto err_out_drop;
>> +
>> + if (!ionic_q_has_space(q, ndescs)) {
>> + netif_stop_subqueue(netdev, queue_index);
>> + q->stop++;
>> +
>> + /* Might race with ionic_tx_clean, check again */
>> + smp_rmb();
>> + if (ionic_q_has_space(q, ndescs)) {
>> + netif_wake_subqueue(netdev, queue_index);
>> + q->wake++;
>> + } else {
>> + return NETDEV_TX_BUSY;
> This should never really happen..
Couldn't we have an Rx interrupt and a call to ionic_tx_clean() in the
middle of this?
>
>> + }
>> + }
>> +
>> + if (skb_is_gso(skb))
>> + err = ionic_tx_tso(q, skb);
>> + else
>> + err = ionic_tx(q, skb);
>> +
>> + if (err)
>> + goto err_out_drop;
> .. at this point if you can't guarantee fitting biggest possible frame
> in, you have to stop the ring.
Yep, that would work.
Thanks,
sln
Powered by blists - more mailing lists