[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <4b88121b8b1aab63c8541c9ed9969315defb4db1.1664384503.git.namcaov@gmail.com>
Date: Wed, 28 Sep 2022 19:21:50 +0200
From: Nam Cao <namcaov@...il.com>
To: forest@...ttletooquiet.net, gregkh@...uxfoundation.org,
dan.carpenter@...cle.com
Cc: namcaov@...il.com, philipp.g.hortmann@...il.com,
linux-kernel@...r.kernel.org, linux-staging@...ts.linux.dev
Subject: [RFC PATCH v2 4/4] staging: vt6655: implement allocation failure handling
The function device_rx_srv does not handle allocation failure very well.
Currently, it performs these steps:
- Unmap DMA buffer and hand over the buffer to mac80211
- Allocate and dma-map new buffer
- If allocation fails, abort
The problem is that, it aborts while still marking the buffer as
OWNED_BY_HOST. So when this function is called again in the future, it
incorrectly perceives the same buffer as valid and dma-unmap and hand
over this buffer to mac80211 again.
Re-implement this function to do things in a different order:
- Allocate and dma-map new buffer
- If allocation fails, abort and give up the ownership of the
buffer (so that the device can re-use this buffer)
- If allocation does not fail: unmap dma buffer and hand over
the buffer to mac80211
Thus, when the driver cannot allocate new buffer, it simply discards the
received data and re-use the current buffer.
Also split device_alloc_rx_buf() into 2 parts: allocating new buffer and
initializing the buffer, so that we can read the old buffer in between.
Signed-off-by: Nam Cao <namcaov@...il.com>
---
drivers/staging/vt6655/device_main.c | 33 ++++++++++++++++++----------
1 file changed, 21 insertions(+), 12 deletions(-)
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index c8cae6df7f51..cc952acd9825 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -132,7 +132,8 @@ static int device_init_td1_ring(struct vnt_private *priv);
static int device_rx_srv(struct vnt_private *priv, unsigned int idx);
static int device_tx_srv(struct vnt_private *priv, unsigned int idx);
-static bool device_alloc_rx_buf(struct vnt_private *, struct vnt_rx_desc *);
+static bool device_alloc_rx_buf(struct vnt_private *, struct vnt_rd_info *);
+static void device_init_rx_desc(struct vnt_private *priv, struct vnt_rx_desc *rd);
static void device_free_rx_buf(struct vnt_private *priv,
struct vnt_rx_desc *rd);
static void device_init_registers(struct vnt_private *priv);
@@ -611,12 +612,13 @@ static int device_init_rd0_ring(struct vnt_private *priv)
goto err_free_desc;
}
- if (!device_alloc_rx_buf(priv, desc)) {
+ if (!device_alloc_rx_buf(priv, desc->rd_info)) {
dev_err(&priv->pcid->dev, "can not alloc rx bufs\n");
ret = -ENOMEM;
goto err_free_rd;
}
+ device_init_rx_desc(priv, desc);
desc->next = &priv->aRD0Ring[(i + 1) % priv->opts.rx_descs0];
desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_rx_desc));
}
@@ -657,12 +659,13 @@ static int device_init_rd1_ring(struct vnt_private *priv)
goto err_free_desc;
}
- if (!device_alloc_rx_buf(priv, desc)) {
+ if (!device_alloc_rx_buf(priv, desc->rd_info)) {
dev_err(&priv->pcid->dev, "can not alloc rx bufs\n");
ret = -ENOMEM;
goto err_free_rd;
}
+ device_init_rx_desc(priv, desc);
desc->next = &priv->aRD1Ring[(i + 1) % priv->opts.rx_descs1];
desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_rx_desc));
}
@@ -821,6 +824,7 @@ static void device_free_td1_ring(struct vnt_private *priv)
static int device_rx_srv(struct vnt_private *priv, unsigned int idx)
{
struct vnt_rx_desc *rd;
+ struct vnt_rd_info new_info;
int works = 0;
for (rd = priv->pCurrRD[idx];
@@ -832,13 +836,18 @@ static int device_rx_srv(struct vnt_private *priv, unsigned int idx)
if (!rd->rd_info->skb)
break;
- vnt_receive_frame(priv, rd);
-
- if (!device_alloc_rx_buf(priv, rd)) {
+ if (!device_alloc_rx_buf(priv, &new_info)) {
dev_err(&priv->pcid->dev,
"can not allocate rx buf\n");
+ rd->rd0.owner = OWNED_BY_NIC;
+ rd = rd->next;
break;
}
+
+ vnt_receive_frame(priv, rd);
+
+ memcpy(rd->rd_info, &new_info, sizeof(new_info));
+ device_init_rx_desc(priv, rd);
}
priv->pCurrRD[idx] = rd;
@@ -847,10 +856,8 @@ static int device_rx_srv(struct vnt_private *priv, unsigned int idx)
}
static bool device_alloc_rx_buf(struct vnt_private *priv,
- struct vnt_rx_desc *rd)
+ struct vnt_rd_info *rd_info)
{
- struct vnt_rd_info *rd_info = rd->rd_info;
-
rd_info->skb = dev_alloc_skb((int)priv->rx_buf_sz);
if (!rd_info->skb)
return false;
@@ -864,15 +871,17 @@ static bool device_alloc_rx_buf(struct vnt_private *priv,
rd_info->skb = NULL;
return false;
}
+ return true;
+}
+static void device_init_rx_desc(struct vnt_private *priv, struct vnt_rx_desc *rd)
+{
*((unsigned int *)&rd->rd0) = 0; /* FIX cast */
rd->rd0.res_count = cpu_to_le16(priv->rx_buf_sz);
rd->rd0.owner = OWNED_BY_NIC;
rd->rd1.req_count = cpu_to_le16(priv->rx_buf_sz);
- rd->buff_addr = cpu_to_le32(rd_info->skb_dma);
-
- return true;
+ rd->buff_addr = cpu_to_le32(rd->rd_info->skb_dma);
}
static void device_free_rx_buf(struct vnt_private *priv,
--
2.25.1
Powered by blists - more mailing lists