lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <8d93566da4dfa0917ac2aac11866795c3d5761ae.camel@mellanox.com>
Date:   Wed, 24 Jul 2019 19:56:59 +0000
From:   Saeed Mahameed <saeedm@...lanox.com>
To:     "linux-can@...r.kernel.org" <linux-can@...r.kernel.org>,
        "jhofstee@...tronenergy.com" <jhofstee@...tronenergy.com>
CC:     "linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
        "wg@...ndegger.com" <wg@...ndegger.com>,
        "anilkumar@...com" <anilkumar@...com>,
        "anantgole@...com" <anantgole@...com>,
        "davem@...emloft.net" <davem@...emloft.net>,
        "mkl@...gutronix.de" <mkl@...gutronix.de>,
        "netdev@...r.kernel.org" <netdev@...r.kernel.org>
Subject: Re: [PATCH] can: ti_hecc: use timestamp based rx-offloading

On Mon, 2019-04-29 at 12:03 +0000, Jeroen Hofstee wrote:
> As already mentioned in [1] and included in [2], there is an off by
> one
> issue since the high bank is already enabled when the _next_ mailbox
> to
> be read has index 12, so the mailbox being read was 13. The message
> can
> therefore go into mailbox 31 and the driver will be repolled until
> the
> mailbox 12 eventually receives a msg. Or the message might end up in
> the
> 12th mailbox, but then it would become disabled after reading it and
> only
> be enabled again in the next "round" after mailbox 13 was read, which
> can
> cause out of order messages, since the lower priority mailboxes can
> accept messages in the meantime.
> 
> As mentioned in [3] there is a hardware race condition when changing
> the
> CANME register while messages are being received. Even when including
> a
> busy poll on reception, like in [2] there are still overflows and out
> of
> order messages at times, but less then without the busy loop polling.
> Unlike what the patch suggests, the polling time is not in the
> microsecond
> range, but takes as long as a current CAN bus reception needs to
> finish,
> so typically more in the fraction of millisecond range. Since the
> timeout
> is in jiffies it won't timeout.
> 
> Even with these additional fixes the driver is still not able to
> provide a
> proper FIFO which doesn't drop packages. So change the driver to use
> rx-offload and base order on timestamp instead of message box
> numbers. As
> a side affect, this also fixes [4] and [5].
> 
> Before this change messages with a single byte counter were dropped /
> received out of order at a bitrate of 250kbit/s on an am3517. With
> this
> patch that no longer occurs up to and including 1Mbit/s.
> 
> [1] 
> https://linux-can.vger.kernel.narkive.com/zgO9inVi/patch-can-ti-hecc-fix-rx-wrong-sequence-issue#post6
> [2] 
> http://arago-project.org/git/projects/?p=linux-omap3.git;a=commit;h=02346892777f07245de4d5af692513ebd852dcb2
> [3] 
> https://linux-can.vger.kernel.narkive.com/zgO9inVi/patch-can-ti-hecc-fix-rx-wrong-sequence-issue#post5
> [4] https://patchwork.ozlabs.org/patch/895956/
> [5] https://www.spinics.net/lists/netdev/msg494971.html
> 
> Cc: Anant Gole <anantgole@...com>
> Cc: AnilKumar Ch <anilkumar@...com>
> Signed-off-by: Jeroen Hofstee <jhofstee@...tronenergy.com>
> ---
>  drivers/net/can/ti_hecc.c | 189 +++++++++++++-----------------------
> ----------
>  1 file changed, 53 insertions(+), 136 deletions(-)
> 
> diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
> index db6ea93..fe7ffff 100644
> --- a/drivers/net/can/ti_hecc.c
> +++ b/drivers/net/can/ti_hecc.c
> @@ -5,6 +5,7 @@
>   * specs for the same is available at <http://www.ti.com>
>   *
>   * Copyright (C) 2009 Texas Instruments Incorporated - 
> http://www.ti.com/
> + * Copyright (C) 2019 Jeroen Hofstee <jhofstee@...tronenergy.com>
>   *
>   * This program is free software; you can redistribute it and/or
>   * modify it under the terms of the GNU General Public License as
> @@ -34,6 +35,7 @@
>  #include <linux/can/dev.h>
>  #include <linux/can/error.h>
>  #include <linux/can/led.h>
> +#include <linux/can/rx-offload.h>
>  
>  #define DRV_NAME "ti_hecc"
>  #define HECC_MODULE_VERSION     "0.7"
> @@ -63,29 +65,16 @@ MODULE_VERSION(HECC_MODULE_VERSION);
>  #define HECC_TX_PRIO_MASK	(MAX_TX_PRIO << HECC_MB_TX_SHIFT)
>  #define HECC_TX_MB_MASK		(HECC_MAX_TX_MBOX - 1)
>  #define HECC_TX_MASK		((HECC_MAX_TX_MBOX - 1) |
> HECC_TX_PRIO_MASK)
> -#define HECC_TX_MBOX_MASK	(~(BIT(HECC_MAX_TX_MBOX) - 1))
> -#define HECC_DEF_NAPI_WEIGHT	HECC_MAX_RX_MBOX
>  
>  /*
> - * Important Note: RX mailbox configuration
> - * RX mailboxes are further logically split into two - main and
> buffer
> - * mailboxes. The goal is to get all packets into main mailboxes as
> - * driven by mailbox number and receive priority (higher to lower)
> and
> - * buffer mailboxes are used to receive pkts while main mailboxes
> are being
> - * processed. This ensures in-order packet reception.
> - *
> - * Here are the recommended values for buffer mailbox. Note that RX
> mailboxes
> - * start after TX mailboxes:
> - *
> - * HECC_MAX_RX_MBOX		HECC_RX_BUFFER_MBOX	No of buffer
> mailboxes
> - * 28				12			8
> - * 16				20			4
> + * RX mailbox configuration
> + * The remaining mailboxes are used for reception and are delivered
> based on
> + * their timestamp, to avoid a hardware race when CANME is changed
> while
> + * CAN-bus traffix is being received.
>   */
>  
>  #define HECC_MAX_RX_MBOX	(HECC_MAX_MAILBOXES - HECC_MAX_TX_MBOX)
> -#define HECC_RX_BUFFER_MBOX	12 /* as per table above */
>  #define HECC_RX_FIRST_MBOX	(HECC_MAX_MAILBOXES - 1)
> -#define HECC_RX_HIGH_MBOX_MASK	(~(BIT(HECC_RX_BUFFER_MBOX) -
> 1))
>  
>  /* TI HECC module registers */
>  #define HECC_CANME		0x0	/* Mailbox enable */
> @@ -123,6 +112,8 @@ MODULE_VERSION(HECC_MODULE_VERSION);
>  #define HECC_CANMDL		0x8
>  #define HECC_CANMDH		0xC
>  
> +#define HECC_CANMOTS		0x100
> +
>  #define HECC_SET_REG		0xFFFFFFFF
>  #define HECC_CANID_MASK		0x3FF	/* 18 bits mask for
> extended id's */
>  #define HECC_CCE_WAIT_COUNT     100	/* Wait for ~1 sec for CCE bit
> */
> @@ -193,7 +184,7 @@ static const struct can_bittiming_const
> ti_hecc_bittiming_const = {
>  
>  struct ti_hecc_priv {
>  	struct can_priv can;	/* MUST be first member/field */
> -	struct napi_struct napi;
> +	struct can_rx_offload offload;
>  	struct net_device *ndev;
>  	struct clk *clk;
>  	void __iomem *base;
> @@ -203,7 +194,6 @@ struct ti_hecc_priv {
>  	spinlock_t mbx_lock; /* CANME register needs protection */
>  	u32 tx_head;
>  	u32 tx_tail;
> -	u32 rx_next;
>  	struct regulator *reg_xceiver;
>  };
>  
> @@ -265,6 +255,11 @@ static inline u32 hecc_get_bit(struct
> ti_hecc_priv *priv, int reg, u32 bit_mask)
>  	return (hecc_read(priv, reg) & bit_mask) ? 1 : 0;
>  }
>  
> +static inline u32 hecc_read_stamp(struct ti_hecc_priv *priv, u32
> mbxno)
> +{
> +	return __raw_readl(priv->hecc_ram + 0x80 + 4 * mbxno);
> +}
> +
>  static int ti_hecc_set_btc(struct ti_hecc_priv *priv)
>  {
>  	struct can_bittiming *bit_timing = &priv->can.bittiming;
> @@ -375,7 +370,6 @@ static void ti_hecc_start(struct net_device
> *ndev)
>  	ti_hecc_reset(ndev);
>  
>  	priv->tx_head = priv->tx_tail = HECC_TX_MASK;
> -	priv->rx_next = HECC_RX_FIRST_MBOX;
>  
>  	/* Enable local and global acceptance mask registers */
>  	hecc_write(priv, HECC_CANGAM, HECC_SET_REG);
> @@ -526,21 +520,17 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff
> *skb, struct net_device *ndev)
>  	return NETDEV_TX_OK;
>  }
>  
> -static int ti_hecc_rx_pkt(struct ti_hecc_priv *priv, int mbxno)
> +static inline struct ti_hecc_priv *rx_offload_to_priv(struct
> can_rx_offload *offload)
>  {
> -	struct net_device_stats *stats = &priv->ndev->stats;
> -	struct can_frame *cf;
> -	struct sk_buff *skb;
> -	u32 data, mbx_mask;
> -	unsigned long flags;
> +	return container_of(offload, struct ti_hecc_priv, offload);
> +}
>  
> -	skb = alloc_can_skb(priv->ndev, &cf);
> -	if (!skb) {
> -		if (printk_ratelimit())
> -			netdev_err(priv->ndev,
> -				"ti_hecc_rx_pkt: alloc_can_skb()
> failed\n");
> -		return -ENOMEM;
> -	}
> +static unsigned int ti_hecc_mailbox_read(struct can_rx_offload
> *offload,
> +					 struct can_frame *cf,
> +					 u32 *timestamp, unsigned int
> mbxno)
> +{
> +	struct ti_hecc_priv *priv = rx_offload_to_priv(offload);
> +	u32 data, mbx_mask;
>  
>  	mbx_mask = BIT(mbxno);
>  	data = hecc_read_mbx(priv, mbxno, HECC_CANMID);
> @@ -558,100 +548,19 @@ static int ti_hecc_rx_pkt(struct ti_hecc_priv
> *priv, int mbxno)
>  		data = hecc_read_mbx(priv, mbxno, HECC_CANMDH);
>  		*(__be32 *)(cf->data + 4) = cpu_to_be32(data);
>  	}
> -	spin_lock_irqsave(&priv->mbx_lock, flags);
> -	hecc_clear_bit(priv, HECC_CANME, mbx_mask);
> -	hecc_write(priv, HECC_CANRMP, mbx_mask);
> -	/* enable mailbox only if it is part of rx buffer mailboxes */
> -	if (priv->rx_next < HECC_RX_BUFFER_MBOX)
> -		hecc_set_bit(priv, HECC_CANME, mbx_mask);
> -	spin_unlock_irqrestore(&priv->mbx_lock, flags);
>  
> -	stats->rx_bytes += cf->can_dlc;
> -	can_led_event(priv->ndev, CAN_LED_EVENT_RX);
> -	netif_receive_skb(skb);
> -	stats->rx_packets++;
> +	*timestamp = hecc_read_stamp(priv, mbxno);
>  
> -	return 0;
> -}
> -
> -/*
> - * ti_hecc_rx_poll - HECC receive pkts
> - *
> - * The receive mailboxes start from highest numbered mailbox till
> last xmit
> - * mailbox. On CAN frame reception the hardware places the data into
> highest
> - * numbered mailbox that matches the CAN ID filter. Since all
> receive mailboxes
> - * have same filtering (ALL CAN frames) packets will arrive in the
> highest
> - * available RX mailbox and we need to ensure in-order packet
> reception.
> - *
> - * To ensure the packets are received in the right order we
> logically divide
> - * the RX mailboxes into main and buffer mailboxes. Packets are
> received as per
> - * mailbox priotity (higher to lower) in the main bank and once it
> is full we
> - * disable further reception into main mailboxes. While the main
> mailboxes are
> - * processed in NAPI, further packets are received in buffer
> mailboxes.
> - *
> - * We maintain a RX next mailbox counter to process packets and once
> all main
> - * mailboxe packets are passed to the upper stack we enable all of
> them but
> - * continue to process packets received in buffer mailboxes. With
> each packet
> - * received from buffer mailbox we enable it immediately so as to
> handle the
> - * overflow from higher mailboxes.
> - */
> -static int ti_hecc_rx_poll(struct napi_struct *napi, int quota)
> -{
> -	struct net_device *ndev = napi->dev;
> -	struct ti_hecc_priv *priv = netdev_priv(ndev);
> -	u32 num_pkts = 0;
> -	u32 mbx_mask;
> -	unsigned long pending_pkts, flags;
> -
> -	if (!netif_running(ndev))
> -		return 0;
> -
> -	while ((pending_pkts = hecc_read(priv, HECC_CANRMP)) &&
> -		num_pkts < quota) {
> -		mbx_mask = BIT(priv->rx_next); /* next rx mailbox to
> process */
> -		if (mbx_mask & pending_pkts) {
> -			if (ti_hecc_rx_pkt(priv, priv->rx_next) < 0)
> -				return num_pkts;
> -			++num_pkts;
> -		} else if (priv->rx_next > HECC_RX_BUFFER_MBOX) {
> -			break; /* pkt not received yet */
> -		}
> -		--priv->rx_next;
> -		if (priv->rx_next == HECC_RX_BUFFER_MBOX) {
> -			/* enable high bank mailboxes */
> -			spin_lock_irqsave(&priv->mbx_lock, flags);
> -			mbx_mask = hecc_read(priv, HECC_CANME);
> -			mbx_mask |= HECC_RX_HIGH_MBOX_MASK;
> -			hecc_write(priv, HECC_CANME, mbx_mask);
> -			spin_unlock_irqrestore(&priv->mbx_lock, flags);
> -		} else if (priv->rx_next == HECC_MAX_TX_MBOX - 1) {
> -			priv->rx_next = HECC_RX_FIRST_MBOX;
> -			break;
> -		}
> -	}
> -
> -	/* Enable packet interrupt if all pkts are handled */
> -	if (hecc_read(priv, HECC_CANRMP) == 0) {
> -		napi_complete(napi);
> -		/* Re-enable RX mailbox interrupts */
> -		mbx_mask = hecc_read(priv, HECC_CANMIM);
> -		mbx_mask |= HECC_TX_MBOX_MASK;
> -		hecc_write(priv, HECC_CANMIM, mbx_mask);
> -	} else {
> -		/* repoll is done only if whole budget is used */
> -		num_pkts = quota;
> -	}
> -
> -	return num_pkts;
> +	return 1;
>  }
>  
>  static int ti_hecc_error(struct net_device *ndev, int int_status,
>  	int err_status)
>  {
>  	struct ti_hecc_priv *priv = netdev_priv(ndev);
> -	struct net_device_stats *stats = &ndev->stats;
>  	struct can_frame *cf;
>  	struct sk_buff *skb;
> +	u32 timestamp;
>  
>  	/* propagate the error condition to the can stack */
>  	skb = alloc_can_err_skb(ndev, &cf);
> @@ -732,9 +641,8 @@ static int ti_hecc_error(struct net_device *ndev,
> int int_status,
>  		}
>  	}
>  
> -	stats->rx_packets++;
> -	stats->rx_bytes += cf->can_dlc;
> -	netif_rx(skb);
> +	timestamp = hecc_read(priv, HECC_CANLNT);
> +	can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
>  
>  	return 0;
>  }
> @@ -744,8 +652,8 @@ static irqreturn_t ti_hecc_interrupt(int irq,
> void *dev_id)
>  	struct net_device *ndev = (struct net_device *)dev_id;
>  	struct ti_hecc_priv *priv = netdev_priv(ndev);
>  	struct net_device_stats *stats = &ndev->stats;
> -	u32 mbxno, mbx_mask, int_status, err_status;
> -	unsigned long ack, flags;
> +	u32 mbxno, mbx_mask, int_status, err_status, stamp;

Reverse xmas tree.

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ