lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 20 Sep 2022 00:27:56 +0000
From:   Vladimir Oltean <vladimir.oltean@....com>
To:     Andrew Lunn <andrew@...n.ch>
CC:     "mattias.forsblad@...il.com" <mattias.forsblad@...il.com>,
        netdev <netdev@...r.kernel.org>,
        Florian Fainelli <f.fainelli@...il.com>,
        Christian Marangi <ansuelsmth@...il.com>
Subject: Re: [PATCH rfc v0 8/9] net: dsa: qca8k: Pass response buffer via
 dsa_rmu_request

On Tue, Sep 20, 2022 at 12:18:52AM +0200, Andrew Lunn wrote:
> Make the calling of operations on the switch more like a request
> response API by passing the address of the response buffer, rather
> than making use of global state.
> 
> To avoid race conditions with the completion timeout, and late
> arriving responses, protect the resp members via a mutex.

Cannot be a mutex; the context of qca8k_rw_reg_ack_handler(), caller of
dsa_inband_complete(), is NET_RX softirq and that is not sleepable.

> 
> The qca8k response frame has an odd layout, the reply is not
> contiguous. Use a small intermediary buffer to convert the reply into
> something which can be memcpy'ed.
> 
> Signed-off-by: Andrew Lunn <andrew@...n.ch>
> ---
>  drivers/net/dsa/qca/qca8k-8xxx.c | 31 ++++++++++++++++++++-----------
>  drivers/net/dsa/qca/qca8k.h      |  1 -
>  include/net/dsa.h                |  7 ++++++-
>  net/dsa/dsa.c                    | 24 +++++++++++++++++++++++-
>  4 files changed, 49 insertions(+), 14 deletions(-)
> 
> diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
> index 55a781851e28..234d79a09e78 100644
> --- a/drivers/net/dsa/qca/qca8k-8xxx.c
> +++ b/drivers/net/dsa/qca/qca8k-8xxx.c
> @@ -138,6 +138,7 @@ static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
>  	struct qca8k_priv *priv = ds->priv;
>  	struct qca_mgmt_ethhdr *mgmt_ethhdr;
>  	u8 len, cmd;
> +	u32 data[4];
>  	int err = 0;
>  
>  	mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb);
> @@ -151,17 +152,16 @@ static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
>  		err = -EPROTO;
>  
>  	if (cmd == MDIO_READ) {
> -		mgmt_eth_data->data[0] = mgmt_ethhdr->mdio_data;
> +		data[0] = mgmt_ethhdr->mdio_data;
>  
>  		/* Get the rest of the 12 byte of data.
>  		 * The read/write function will extract the requested data.
>  		 */
>  		if (len > QCA_HDR_MGMT_DATA1_LEN)
> -			memcpy(mgmt_eth_data->data + 1, skb->data,
> -			       QCA_HDR_MGMT_DATA2_LEN);
> +			memcpy(&data[1], skb->data, QCA_HDR_MGMT_DATA2_LEN);
>  	}
>  
> -	dsa_inband_complete(&mgmt_eth_data->inband, err);
> +	dsa_inband_complete(&mgmt_eth_data->inband, &data, sizeof(data), err);
>  }
>  
>  static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val,
> @@ -230,6 +230,7 @@ static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
>  {
>  	struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
>  	struct sk_buff *skb;
> +	u32 data[4];
>  	int ret;
>  
>  	skb = qca8k_alloc_mdio_header(MDIO_READ, reg, NULL,
> @@ -249,12 +250,13 @@ static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
>  	skb->dev = priv->mgmt_master;
>  
>  	ret = dsa_inband_request(&mgmt_eth_data->inband, skb,
> -			      qca8k_mdio_header_fill_seq_num,
> -			      QCA8K_ETHERNET_TIMEOUT);

Argument list should have been properly aligned when this patch set introduced it.

> +				 qca8k_mdio_header_fill_seq_num,
> +				 &data, sizeof(data),
> +				 QCA8K_ETHERNET_TIMEOUT);

Kind of feeling the need for an error check right here, instead of
proceeding to look at the buffer.

>  
> -	*val = mgmt_eth_data->data[0];
> +	*val = data[0];
>  	if (len > QCA_HDR_MGMT_DATA1_LEN)
> -		memcpy(val + 1, mgmt_eth_data->data + 1, len - QCA_HDR_MGMT_DATA1_LEN);
> +		memcpy(val + 1, &data[1], len - QCA_HDR_MGMT_DATA1_LEN);

This is pretty hard to digest, but it looks like it could work.
So this can run concurrently with qca8k_rw_reg_ack_handler(), but since
the end of dsa_inband_request() sets inband->resp to NULL, then even if
the response will come later, it won't touch the driver-provided on-stack
buffer, since the DSA completion structure lost the reference to it.

How do we deal with the response being processed so late by the handler
that it overlaps with the dsa_inband_request() call of the next seqid?
We open up to another window of opportunity for the handler to have a
valid buffer and length to which it can copy stuff. Does it matter,
since the seqid of the response will be smaller than that of the request?
Is reordering on multi-CPU, multi-queue masters handled in any way? This
will be a problem regardless of QoS - currently we assume that all
management frames are treated the same by the DSA master. But it has no
insight into the DSA header format, so why would it? It could be doing
RSS and even find some entropy in our seqid junk data. It's a bit late
to think through right now.

>  
>  	mutex_unlock(&mgmt_eth_data->mutex);
>  
> @@ -285,6 +287,7 @@ static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
>  
>  	ret = dsa_inband_request(&mgmt_eth_data->inband, skb,
>  				 qca8k_mdio_header_fill_seq_num,
> +				 NULL, 0,
>  				 QCA8K_ETHERNET_TIMEOUT);
>  
>  	mutex_unlock(&mgmt_eth_data->mutex);
> @@ -412,16 +415,18 @@ qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
>  			struct sk_buff *read_skb, u32 *val)
>  {
>  	struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL);
> +	u32 data[4];
>  	int ret;
>  
>  	ret = dsa_inband_request(&mgmt_eth_data->inband, skb,
>  				 qca8k_mdio_header_fill_seq_num,
> +				 &data, sizeof(data),
>  				 QCA8K_ETHERNET_TIMEOUT);
>  
>  	if (ret)
>  		return ret;
>  
> -	*val = mgmt_eth_data->data[0];
> +	*val = data[0];
>  
>  	return 0;
>  }
> @@ -434,6 +439,7 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
>  	struct qca8k_mgmt_eth_data *mgmt_eth_data;
>  	u32 write_val, clear_val = 0, val;
>  	struct net_device *mgmt_master;
> +	u32 resp_data[4];
>  	int ret, ret1;
>  
>  	if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
> @@ -494,6 +500,7 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
>  
>  	ret = dsa_inband_request(&mgmt_eth_data->inband, write_skb,
>  				 qca8k_mdio_header_fill_seq_num,
> +				 NULL, 0,
>  				 QCA8K_ETHERNET_TIMEOUT);
>  
>  	if (ret) {
> @@ -514,12 +521,13 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
>  	if (read) {
>  		ret = dsa_inband_request(&mgmt_eth_data->inband, read_skb,
>  					 qca8k_mdio_header_fill_seq_num,
> +					 &resp_data, sizeof(resp_data),
>  					 QCA8K_ETHERNET_TIMEOUT);
>  
>  		if (ret)
>  			goto exit;
>  
> -		ret = mgmt_eth_data->data[0] & QCA8K_MDIO_MASTER_DATA_MASK;
> +		ret = resp_data[0] & QCA8K_MDIO_MASTER_DATA_MASK;
>  	} else {
>  		kfree_skb(read_skb);
>  	}
> @@ -527,6 +535,7 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
>  
>  	ret = dsa_inband_request(&mgmt_eth_data->inband, clear_skb,
>  				 qca8k_mdio_header_fill_seq_num,
> +				 NULL, 0,
>  				 QCA8K_ETHERNET_TIMEOUT);
>  
>  	mutex_unlock(&mgmt_eth_data->mutex);
> @@ -1442,7 +1451,7 @@ static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *sk
>  exit:
>  	/* Complete on receiving all the mib packet */
>  	if (refcount_dec_and_test(&mib_eth_data->port_parsed))
> -		dsa_inband_complete(&mib_eth_data->inband, err);
> +		dsa_inband_complete(&mib_eth_data->inband, NULL, 0, err);
>  }
>  
>  static int
> diff --git a/drivers/net/dsa/qca/qca8k.h b/drivers/net/dsa/qca/qca8k.h
> index 682106206282..70494096e251 100644
> --- a/drivers/net/dsa/qca/qca8k.h
> +++ b/drivers/net/dsa/qca/qca8k.h
> @@ -348,7 +348,6 @@ enum {
>  struct qca8k_mgmt_eth_data {
>  	struct dsa_inband inband;
>  	struct mutex mutex; /* Enforce one mdio read/write at time */
> -	u32 data[4];
>  };
>  
>  struct qca8k_mib_eth_data {
> diff --git a/include/net/dsa.h b/include/net/dsa.h
> index 1a920f89b667..dad9e31d36ce 100644
> --- a/include/net/dsa.h
> +++ b/include/net/dsa.h
> @@ -1285,12 +1285,17 @@ struct dsa_inband {
>  	u32 seqno;
>  	u32 seqno_mask;
>  	int err;
> +	struct mutex resp_lock; /* Protects resp* members */
> +	void *resp;
> +	unsigned int resp_len;

Would be good to be a bit more verbose about what "protecting" means
here (just offering a consistent view of the buffer pointer and of its
length from DSA's perspective).

>  };
>  
>  void dsa_inband_init(struct dsa_inband *inband, u32 seqno_mask);
> -void dsa_inband_complete(struct dsa_inband *inband, int err);
> +void dsa_inband_complete(struct dsa_inband *inband,
> +		      void *resp, unsigned int resp_len, int err);
>  int dsa_inband_request(struct dsa_inband *inband, struct sk_buff *skb,
>  		       void (* insert_seqno)(struct sk_buff *skb, u32 seqno),
> +		       void *resp, unsigned int resp_len,
>  		       int timeout_ms);
>  int dsa_inband_wait_for_completion(struct dsa_inband *inband, int timeout_ms);
>  u32 dsa_inband_seqno(struct dsa_inband *inband);
> diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
> index 0de283ac0bfc..4fa0ab4ae58e 100644
> --- a/net/dsa/dsa.c
> +++ b/net/dsa/dsa.c
> @@ -521,14 +521,24 @@ EXPORT_SYMBOL_GPL(dsa_mdb_present_in_other_db);
>  void dsa_inband_init(struct dsa_inband *inband, u32 seqno_mask)
>  {
>  	init_completion(&inband->completion);
> +	mutex_init(&inband->resp_lock);
>  	inband->seqno_mask = seqno_mask;
>  	inband->seqno = 0;
>  }
>  EXPORT_SYMBOL_GPL(dsa_inband_init);
>  
> -void dsa_inband_complete(struct dsa_inband *inband, int err)
> +void dsa_inband_complete(struct dsa_inband *inband,
> +			 void *resp, unsigned int resp_len,
> +			 int err)
>  {
>  	inband->err = err;
> +
> +	mutex_lock(&inband->resp_lock);
> +	resp_len = min(inband->resp_len, resp_len);

No warning for truncation caused by resp_len > inband->resp_len?
It seems like a valid error. At least I tried to test Mattias' patch
set, and this is one of the problems that really happened.

> +	if (inband->resp && resp)
> +		memcpy(inband->resp, resp, resp_len);
> +	mutex_unlock(&inband->resp_lock);
> +
>  	complete(&inband->completion);
>  }
>  EXPORT_SYMBOL_GPL(dsa_inband_complete);
> @@ -548,6 +558,7 @@ EXPORT_SYMBOL_GPL(dsa_inband_wait_for_completion);
>   */
>  int dsa_inband_request(struct dsa_inband *inband, struct sk_buff *skb,
>  		       void (* insert_seqno)(struct sk_buff *skb, u32 seqno),
> +		       void *resp, unsigned int resp_len,
>  		       int timeout_ms)
>  {
>  	unsigned long jiffies = msecs_to_jiffies(timeout_ms);
> @@ -556,6 +567,11 @@ int dsa_inband_request(struct dsa_inband *inband, struct sk_buff *skb,
>  	reinit_completion(&inband->completion);
>  	inband->err = 0;
>  
> +	mutex_lock(&inband->resp_lock);
> +	inband->resp = resp;
> +	inband->resp_len = resp_len;
> +	mutex_unlock(&inband->resp_lock);
> +
>  	if (insert_seqno) {
>  		inband->seqno++;
>  		insert_seqno(skb, inband->seqno & inband->seqno_mask);
> @@ -564,6 +580,12 @@ int dsa_inband_request(struct dsa_inband *inband, struct sk_buff *skb,
>  	dev_queue_xmit(skb);
>  
>  	ret = wait_for_completion_timeout(&inband->completion, jiffies);
> +
> +	mutex_lock(&inband->resp_lock);
> +	inband->resp = NULL;
> +	inband->resp_len = 0;
> +	mutex_unlock(&inband->resp_lock);
> +
>  	if (ret < 0)
>  		return ret;
>  	if (ret == 0)
> -- 
> 2.37.2
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ