[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <61004560-8dae-9abd-5362-d5ec4d846f87@intel.com>
Date: Tue, 17 Jan 2023 16:19:43 -0800
From: Jacob Keller <jacob.e.keller@...el.com>
To: Robert Hancock <robert.hancock@...ian.com>,
Nicolas Ferre <nicolas.ferre@...rochip.com>,
Claudiu Beznea <claudiu.beznea@...rochip.com>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
"Richard Cochran" <richardcochran@...il.com>
CC: <netdev@...r.kernel.org>, <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH net-next] net: macb: simplify TX timestamp handling
On 1/16/2023 2:08 PM, Robert Hancock wrote:
> This driver was capturing the TX timestamp values from the TX ring
> during the TX completion path, but deferring the actual packet TX
> timestamp updating to a workqueue. There does not seem to be much of a
> reason for this with the current state of the driver. Simplify this to
> just do the TX timestamping as part of the TX completion path, to avoid
> the need for the extra timestamp buffer and workqueue.
>
> Signed-off-by: Robert Hancock <robert.hancock@...ian.com>
> ---
Makes sense. I can't see anything that would require a work queue, and
removing that is a big win. (It's caused no end of troubles in some of
our Intel drivers, but we have no choice due to the hardware design :( )
I'm not super familiar with this code, but..
Reviewed-by: Jacob Keller <jacob.e.keller@...el.com>
> drivers/net/ethernet/cadence/macb.h | 29 ++-------
> drivers/net/ethernet/cadence/macb_main.c | 16 +++--
> drivers/net/ethernet/cadence/macb_ptp.c | 83 ++++++------------------
> 3 files changed, 34 insertions(+), 94 deletions(-)
>
> diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
> index 9c410f93a103..14dfec4db8f9 100644
> --- a/drivers/net/ethernet/cadence/macb.h
> +++ b/drivers/net/ethernet/cadence/macb.h
> @@ -768,8 +768,6 @@
> #define gem_readl_n(port, reg, idx) (port)->macb_reg_readl((port), GEM_##reg + idx * 4)
> #define gem_writel_n(port, reg, idx, value) (port)->macb_reg_writel((port), GEM_##reg + idx * 4, (value))
>
> -#define PTP_TS_BUFFER_SIZE 128 /* must be power of 2 */
> -
> /* Conditional GEM/MACB macros. These perform the operation to the correct
> * register dependent on whether the device is a GEM or a MACB. For registers
> * and bitfields that are common across both devices, use macb_{read,write}l
> @@ -819,11 +817,6 @@ struct macb_dma_desc_ptp {
> u32 ts_1;
> u32 ts_2;
> };
> -
> -struct gem_tx_ts {
> - struct sk_buff *skb;
> - struct macb_dma_desc_ptp desc_ptp;
> -};
> #endif
>
> /* DMA descriptor bitfields */
> @@ -1224,12 +1217,6 @@ struct macb_queue {
> void *rx_buffers;
> struct napi_struct napi_rx;
> struct queue_stats stats;
> -
> -#ifdef CONFIG_MACB_USE_HWSTAMP
> - struct work_struct tx_ts_task;
> - unsigned int tx_ts_head, tx_ts_tail;
> - struct gem_tx_ts tx_timestamps[PTP_TS_BUFFER_SIZE];
> -#endif
> };
>
> struct ethtool_rx_fs_item {
> @@ -1340,14 +1327,14 @@ enum macb_bd_control {
>
> void gem_ptp_init(struct net_device *ndev);
> void gem_ptp_remove(struct net_device *ndev);
> -int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb, struct macb_dma_desc *des);
> +void gem_ptp_txstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc);
> void gem_ptp_rxstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc);
> -static inline int gem_ptp_do_txstamp(struct macb_queue *queue, struct sk_buff *skb, struct macb_dma_desc *desc)
> +static inline void gem_ptp_do_txstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc)
> {
> - if (queue->bp->tstamp_config.tx_type == TSTAMP_DISABLED)
> - return -ENOTSUPP;
> + if (bp->tstamp_config.tx_type == TSTAMP_DISABLED)
> + return;
>
> - return gem_ptp_txstamp(queue, skb, desc);
> + gem_ptp_txstamp(bp, skb, desc);
> }
>
> static inline void gem_ptp_do_rxstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc)
> @@ -1363,11 +1350,7 @@ int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd);
> static inline void gem_ptp_init(struct net_device *ndev) { }
> static inline void gem_ptp_remove(struct net_device *ndev) { }
>
> -static inline int gem_ptp_do_txstamp(struct macb_queue *queue, struct sk_buff *skb, struct macb_dma_desc *desc)
> -{
> - return -1;
> -}
> -
> +static inline void gem_ptp_do_txstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc) { }
> static inline void gem_ptp_do_rxstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc) { }
> #endif
>
> diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
> index 95667b979fab..6a0419acac9d 100644
> --- a/drivers/net/ethernet/cadence/macb_main.c
> +++ b/drivers/net/ethernet/cadence/macb_main.c
> @@ -1191,13 +1191,9 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
> /* First, update TX stats if needed */
> if (skb) {
> if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
> - !ptp_one_step_sync(skb) &&
> - gem_ptp_do_txstamp(queue, skb, desc) == 0) {
> - /* skb now belongs to timestamp buffer
> - * and will be removed later
> - */
> - tx_skb->skb = NULL;
> - }
> + !ptp_one_step_sync(skb))
> + gem_ptp_do_txstamp(bp, skb, desc);
> +
> netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
> macb_tx_ring_wrap(bp, tail),
> skb->data);
> @@ -2260,6 +2256,12 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
> return ret;
> }
>
> +#ifdef CONFIG_MACB_USE_HWSTAMP
> + if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
> + (bp->hw_dma_cap & HW_DMA_CAP_PTP))
> + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
> +#endif
> +
> is_lso = (skb_shinfo(skb)->gso_size != 0);
>
> if (is_lso) {
> diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
> index e6cb20aaa76a..f962a95068a0 100644
> --- a/drivers/net/ethernet/cadence/macb_ptp.c
> +++ b/drivers/net/ethernet/cadence/macb_ptp.c
> @@ -292,79 +292,39 @@ void gem_ptp_rxstamp(struct macb *bp, struct sk_buff *skb,
> }
> }
>
> -static void gem_tstamp_tx(struct macb *bp, struct sk_buff *skb,
> - struct macb_dma_desc_ptp *desc_ptp)
> +void gem_ptp_txstamp(struct macb *bp, struct sk_buff *skb,
> + struct macb_dma_desc *desc)
> {
> struct skb_shared_hwtstamps shhwtstamps;
> - struct timespec64 ts;
> -
> - gem_hw_timestamp(bp, desc_ptp->ts_1, desc_ptp->ts_2, &ts);
> - memset(&shhwtstamps, 0, sizeof(shhwtstamps));
> - shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
> - skb_tstamp_tx(skb, &shhwtstamps);
> -}
> -
> -int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb,
> - struct macb_dma_desc *desc)
> -{
> - unsigned long tail = READ_ONCE(queue->tx_ts_tail);
> - unsigned long head = queue->tx_ts_head;
> struct macb_dma_desc_ptp *desc_ptp;
> - struct gem_tx_ts *tx_timestamp;
> -
> - if (!GEM_BFEXT(DMA_TXVALID, desc->ctrl))
> - return -EINVAL;
> + struct timespec64 ts;
>
> - if (CIRC_SPACE(head, tail, PTP_TS_BUFFER_SIZE) == 0)
> - return -ENOMEM;
> + if (!GEM_BFEXT(DMA_TXVALID, desc->ctrl)) {
> + dev_warn_ratelimited(&bp->pdev->dev,
> + "Timestamp not set in TX BD as expected\n");
> + return;
> + }
>
> - desc_ptp = macb_ptp_desc(queue->bp, desc);
> + desc_ptp = macb_ptp_desc(bp, desc);
> /* Unlikely but check */
> - if (!desc_ptp)
> - return -EINVAL;
> - skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
> - tx_timestamp = &queue->tx_timestamps[head];
> - tx_timestamp->skb = skb;
> + if (!desc_ptp) {
> + dev_warn_ratelimited(&bp->pdev->dev,
> + "Timestamp not supported in BD\n");
> + return;
> + }
> +
> /* ensure ts_1/ts_2 is loaded after ctrl (TX_USED check) */
> dma_rmb();
> - tx_timestamp->desc_ptp.ts_1 = desc_ptp->ts_1;
> - tx_timestamp->desc_ptp.ts_2 = desc_ptp->ts_2;
> - /* move head */
> - smp_store_release(&queue->tx_ts_head,
> - (head + 1) & (PTP_TS_BUFFER_SIZE - 1));
> -
> - schedule_work(&queue->tx_ts_task);
> - return 0;
> -}
> + gem_hw_timestamp(bp, desc_ptp->ts_1, desc_ptp->ts_2, &ts);
>
> -static void gem_tx_timestamp_flush(struct work_struct *work)
> -{
> - struct macb_queue *queue =
> - container_of(work, struct macb_queue, tx_ts_task);
> - unsigned long head, tail;
> - struct gem_tx_ts *tx_ts;
> -
> - /* take current head */
> - head = smp_load_acquire(&queue->tx_ts_head);
> - tail = queue->tx_ts_tail;
> -
> - while (CIRC_CNT(head, tail, PTP_TS_BUFFER_SIZE)) {
> - tx_ts = &queue->tx_timestamps[tail];
> - gem_tstamp_tx(queue->bp, tx_ts->skb, &tx_ts->desc_ptp);
> - /* cleanup */
> - dev_kfree_skb_any(tx_ts->skb);
> - /* remove old tail */
> - smp_store_release(&queue->tx_ts_tail,
> - (tail + 1) & (PTP_TS_BUFFER_SIZE - 1));
> - tail = queue->tx_ts_tail;
> - }
> + memset(&shhwtstamps, 0, sizeof(shhwtstamps));
> + shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
> + skb_tstamp_tx(skb, &shhwtstamps);
> }
>
> void gem_ptp_init(struct net_device *dev)
> {
> struct macb *bp = netdev_priv(dev);
> - struct macb_queue *queue;
> - unsigned int q;
>
> bp->ptp_clock_info = gem_ptp_caps_template;
>
> @@ -384,11 +344,6 @@ void gem_ptp_init(struct net_device *dev)
> }
>
> spin_lock_init(&bp->tsu_clk_lock);
> - for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
> - queue->tx_ts_head = 0;
> - queue->tx_ts_tail = 0;
> - INIT_WORK(&queue->tx_ts_task, gem_tx_timestamp_flush);
> - }
>
> gem_ptp_init_tsu(bp);
>
Powered by blists - more mailing lists