[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <05958f83-b703-4ce7-a526-c6d0bc3fb81e@intel.com>
Date: Thu, 25 Sep 2025 11:17:55 -0700
From: Dave Jiang <dave.jiang@...el.com>
To: Pavan Chebbi <pavan.chebbi@...adcom.com>
Cc: jgg@...pe.ca, michael.chan@...adcom.com, saeedm@...dia.com,
Jonathan.Cameron@...wei.com, davem@...emloft.net, corbet@....net,
edumazet@...gle.com, gospo@...adcom.com, kuba@...nel.org,
netdev@...r.kernel.org, pabeni@...hat.com, andrew+netdev@...n.ch,
selvin.xavier@...adcom.com, leon@...nel.org,
kalesh-anakkur.purayil@...adcom.com
Subject: Re: [PATCH net-next v2 5/6] bnxt_fwctl: Add bnxt fwctl device
On 9/25/25 8:46 AM, Dave Jiang wrote:
>
>
> On 9/24/25 9:31 PM, Pavan Chebbi wrote:
>> On Thu, Sep 25, 2025 at 4:02 AM Dave Jiang <dave.jiang@...el.com> wrote:
>>>
>>
>>>> +static void *bnxtctl_fw_rpc(struct fwctl_uctx *uctx,
>>>> + enum fwctl_rpc_scope scope,
>>>> + void *in, size_t in_len, size_t *out_len)
>>>> +{
>>>> + struct bnxtctl_dev *bnxtctl =
>>>> + container_of(uctx->fwctl, struct bnxtctl_dev, fwctl);
>>>> + struct bnxt_aux_priv *bnxt_aux_priv = bnxtctl->aux_priv;
>>>> + struct fwctl_dma_info_bnxt *dma_buf = NULL;
>>>> + struct device *dev = &uctx->fwctl->dev;
>>>> + struct fwctl_rpc_bnxt *msg = in;
>>>> + struct bnxt_fw_msg rpc_in;
>>>> + int i, rc, err = 0;
>>>> + int dma_buf_size;
>>>> +
>>>> + rpc_in.msg = kzalloc(msg->req_len, GFP_KERNEL);
>>>
>>> I think if you use __free(kfree) for all the allocations in the function, you can be rid of the gotos.
>>>
>> Thanks Dave for the review. Would you be fine if I defer using scope
>> based cleanup for later?
>> I need some time to understand the mechanism better and correctly
>> define the macros as some
>> pointers holding the memory are members within a stack variable. I
>> will fix the goto/free issues
>> you highlighted in the next revision. I hope that is going to be OK?
>
> Sure that is fine. The way things are done in this function makes things a bit tricky to do cleanup properly via the scope based cleanup. I might play with it a bit and see if I can come up with something. It looks like it needs a bit of refactoring to split some things out. Probably not a bad thing in the long run.
>
Here's a potential template for doing it with scoped based cleanup. It applies on top of this current patch. I only compile tested. There probably will be errors in the conversion. Feel free to use it.
---
diff --git a/drivers/fwctl/bnxt/main.c b/drivers/fwctl/bnxt/main.c
index 1bec4567e35c..714106fd1033 100644
--- a/drivers/fwctl/bnxt/main.c
+++ b/drivers/fwctl/bnxt/main.c
@@ -22,8 +22,6 @@ struct bnxtctl_uctx {
struct bnxtctl_dev {
struct fwctl_device fwctl;
struct bnxt_aux_priv *aux_priv;
- void *dma_virt_addr[MAX_NUM_DMA_INDICATIONS];
- dma_addr_t dma_addr[MAX_NUM_DMA_INDICATIONS];
};
DEFINE_FREE(bnxtctl, struct bnxtctl_dev *, if (_T) fwctl_put(&_T->fwctl))
@@ -76,61 +74,133 @@ static bool bnxtctl_validate_rpc(struct bnxt_en_dev *edev,
return false;
}
-static int bnxt_fw_setup_input_dma(struct bnxtctl_dev *bnxt_dev,
- struct device *dev,
- int num_dma,
- struct fwctl_dma_info_bnxt *msg,
- struct bnxt_fw_msg *fw_msg)
+struct bnxtctl_dma {
+ struct device *dev;
+ int num_dma;
+ void *dma_virt_addr[MAX_NUM_DMA_INDICATIONS];
+ dma_addr_t dma_addr[MAX_NUM_DMA_INDICATIONS];
+};
+
+struct dma_context {
+ struct bnxtctl_dma *dma;
+ struct fwctl_dma_info_bnxt *dma_info;
+};
+
+static void free_dma(struct bnxtctl_dma *dma)
+{
+ int i;
+
+ for (i = 0; i < dma->num_dma; i++) {
+ if (dma->dma_virt_addr[i])
+ dma_free_coherent(dma->dev, 0, dma->dma_virt_addr[i],
+ dma->dma_addr[i]);
+ }
+ kfree(dma);
+}
+DEFINE_FREE(free_dma, struct bnxtctl_dma *, if (_T) free_dma(_T))
+
+static struct bnxtctl_dma *
+allocate_and_setup_dma_bufs(struct device *dev,
+ struct fwctl_dma_info_bnxt *dma_info,
+ int num_dma,
+ struct bnxt_fw_msg *fw_msg)
{
- u8 i, num_allocated = 0;
void *dma_ptr;
- int rc = 0;
+ int i;
+ struct bnxtctl_dma *dma __free(free_dma) =
+ kzalloc(sizeof(*dma), GFP_KERNEL);
+ if (!dma)
+ return ERR_PTR(-ENOMEM);
+
+ dma->dev = dev->parent;
for (i = 0; i < num_dma; i++) {
- if (msg->len == 0 || msg->len > MAX_DMA_MEM_SIZE) {
- rc = -EINVAL;
- goto err;
- }
- bnxt_dev->dma_virt_addr[i] = dma_alloc_coherent(dev->parent,
- msg->len,
- &bnxt_dev->dma_addr[i],
- GFP_KERNEL);
- if (!bnxt_dev->dma_virt_addr[i]) {
- rc = -ENOMEM;
- goto err;
- }
- num_allocated++;
- if (!(msg->read_from_device)) {
- if (copy_from_user(bnxt_dev->dma_virt_addr[i],
- u64_to_user_ptr(msg->data),
- msg->len)) {
- rc = -EFAULT;
- goto err;
- }
- }
- dma_ptr = fw_msg->msg + msg->offset;
+ __le64 *dmap;
- if ((PTR_ALIGN(dma_ptr, 8) == dma_ptr) &&
- msg->offset < fw_msg->msg_len) {
- __le64 *dmap = dma_ptr;
+ if (dma_info->len == 0 || dma_info->len > MAX_DMA_MEM_SIZE)
+ return ERR_PTR(-EINVAL);
- *dmap = cpu_to_le64(bnxt_dev->dma_addr[i]);
- } else {
- rc = -EINVAL;
- goto err;
+ dma->dma_virt_addr[i] =
+ dma_alloc_coherent(dma->dev, dma_info->len,
+ &dma->dma_addr[i], GFP_KERNEL);
+ if (!dma->dma_virt_addr[i])
+ return ERR_PTR(-ENOMEM);
+
+ dma->num_dma++;
+ if (!(dma_info->read_from_device)) {
+ if (copy_from_user(dma->dma_virt_addr[i],
+ u64_to_user_ptr(dma_info->data),
+ dma_info->len))
+ return ERR_PTR(-EFAULT);
}
- msg += 1;
+ dma_ptr = fw_msg->msg + dma_info->offset;
+
+ if (PTR_ALIGN(dma_ptr, 8) != dma_ptr ||
+ dma_info->offset >= fw_msg->msg_len)
+ return ERR_PTR(-EINVAL);
+
+ dmap = dma_ptr;
+ *dmap = cpu_to_le64(dma->dma_addr[i]);
+ dma_info += 1;
+ }
+
+ return no_free_ptr(dma);
+}
+
+static void free_dma_context(struct dma_context *dma_ctx)
+{
+ if (dma_ctx->dma)
+ free_dma(dma_ctx->dma);
+ if (dma_ctx->dma_info)
+ kfree(dma_ctx->dma_info);
+ kfree(dma_ctx);
+}
+DEFINE_FREE(free_dma_ctx, struct dma_context *, if (_T) free_dma_context(_T))
+
+static struct dma_context *
+allocate_and_setup_dma_context(struct device *dev,
+ struct fwctl_rpc_bnxt *rpc_msg,
+ struct bnxt_fw_msg *fw_msg)
+{
+ int num_dma = rpc_msg->num_dma;
+ int dma_buf_size;
+
+ if (!num_dma)
+ return NULL;
+
+ struct dma_context *dma_ctx __free(free_dma_ctx) =
+ kzalloc(sizeof(*dma_ctx), GFP_KERNEL);
+ if (!dma_ctx)
+ return ERR_PTR(-ENOMEM);
+
+ if (num_dma > MAX_NUM_DMA_INDICATIONS) {
+ dev_err(dev, "DMA buffers exceed the number supported\n");
+ return ERR_PTR(-EINVAL);
}
- return rc;
-err:
- for (i = 0; i < num_allocated; i++)
- dma_free_coherent(dev->parent,
- msg->len,
- bnxt_dev->dma_virt_addr[i],
- bnxt_dev->dma_addr[i]);
+ dma_buf_size = num_dma * sizeof(struct fwctl_dma_info_bnxt);
+ struct fwctl_dma_info_bnxt *dma_info __free(kfree)
+ = kzalloc(dma_buf_size, GFP_KERNEL);
+ if (!dma_info) {
+ dev_err(dev, "Failed to allocate dma buffers\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (copy_from_user(dma_info, u64_to_user_ptr(rpc_msg->payload),
+ dma_buf_size)) {
+ dev_dbg(dev, "Failed to copy payload from user\n");
+ return ERR_PTR(-EFAULT);
+ }
+
+ struct bnxtctl_dma *dma __free(free_dma) =
+ allocate_and_setup_dma_bufs(dev, dma_info, num_dma, fw_msg);
+ if (IS_ERR(dma))
+ return ERR_CAST(dma);
+
+ dma_ctx->dma_info = no_free_ptr(dma_info);
+ dma_ctx->dma = no_free_ptr(dma);
- return rc;
+ return no_free_ptr(dma_ctx);
}
static void *bnxtctl_fw_rpc(struct fwctl_uctx *uctx,
@@ -140,34 +210,28 @@ static void *bnxtctl_fw_rpc(struct fwctl_uctx *uctx,
struct bnxtctl_dev *bnxtctl =
container_of(uctx->fwctl, struct bnxtctl_dev, fwctl);
struct bnxt_aux_priv *bnxt_aux_priv = bnxtctl->aux_priv;
- struct fwctl_dma_info_bnxt *dma_buf = NULL;
struct device *dev = &uctx->fwctl->dev;
struct fwctl_rpc_bnxt *msg = in;
struct bnxt_fw_msg rpc_in;
- int i, rc, err = 0;
- int dma_buf_size;
+ int i, rc;
+
+ void *rpc_in_msg __free(kfree) = kzalloc(msg->req_len, GFP_KERNEL);
+ if (!rpc_in_msg)
+ return ERR_PTR(-ENOMEM);
- rpc_in.msg = kzalloc(msg->req_len, GFP_KERNEL);
- if (!rpc_in.msg) {
- err = -ENOMEM;
- goto err_out;
- }
if (copy_from_user(rpc_in.msg, u64_to_user_ptr(msg->req),
msg->req_len)) {
dev_dbg(dev, "Failed to copy in_payload from user\n");
- err = -EFAULT;
- goto err_out;
+ return ERR_PTR(-EFAULT);
}
if (!bnxtctl_validate_rpc(bnxt_aux_priv->edev, &rpc_in))
return ERR_PTR(-EPERM);
rpc_in.msg_len = msg->req_len;
- rpc_in.resp = kzalloc(*out_len, GFP_KERNEL);
- if (!rpc_in.resp) {
- err = -ENOMEM;
- goto err_out;
- }
+ void *rpc_in_resp __free(kfree) = kzalloc(*out_len, GFP_KERNEL);
+ if (!rpc_in_resp)
+ return ERR_PTR(-ENOMEM);
rpc_in.resp_max_len = *out_len;
if (!msg->timeout)
@@ -175,64 +239,37 @@ static void *bnxtctl_fw_rpc(struct fwctl_uctx *uctx,
else
rpc_in.timeout = msg->timeout;
- if (msg->num_dma) {
- if (msg->num_dma > MAX_NUM_DMA_INDICATIONS) {
- dev_err(dev, "DMA buffers exceed the number supported\n");
- err = -EINVAL;
- goto err_out;
- }
- dma_buf_size = msg->num_dma * sizeof(*dma_buf);
- dma_buf = kzalloc(dma_buf_size, GFP_KERNEL);
- if (!dma_buf) {
- dev_err(dev, "Failed to allocate dma buffers\n");
- err = -ENOMEM;
- goto err_out;
- }
-
- if (copy_from_user(dma_buf, u64_to_user_ptr(msg->payload),
- dma_buf_size)) {
- dev_dbg(dev, "Failed to copy payload from user\n");
- err = -EFAULT;
- goto err_out;
- }
-
- rc = bnxt_fw_setup_input_dma(bnxtctl, dev, msg->num_dma,
- dma_buf, &rpc_in);
- if (rc) {
- err = -EIO;
- goto err_out;
- }
- }
+ struct dma_context *dma_ctx __free(free_dma_ctx) =
+ allocate_and_setup_dma_context(dev, msg, &rpc_in);
+ if (IS_ERR(dma_ctx))
+ return ERR_CAST(dma_ctx);
+ rpc_in.msg = rpc_in_msg;
+ rpc_in.resp = rpc_in_resp;
rc = bnxt_send_msg(bnxt_aux_priv->edev, &rpc_in);
- if (rc) {
- err = -EIO;
- goto err_out;
- }
+ if (rc)
+ return ERR_PTR(rc);
+
+ if (!dma_ctx)
+ return no_free_ptr(rpc_in_resp);
for (i = 0; i < msg->num_dma; i++) {
- if (dma_buf[i].read_from_device) {
- if (copy_to_user(u64_to_user_ptr(dma_buf[i].data),
- bnxtctl->dma_virt_addr[i],
- dma_buf[i].len)) {
- dev_dbg(dev, "Failed to copy resp to user\n");
- err = -EFAULT;
- }
+ struct fwctl_dma_info_bnxt *dma_info =
+ &dma_ctx->dma_info[i];
+ struct bnxtctl_dma *dma = dma_ctx->dma;
+
+ if (!dma_info->read_from_device)
+ continue;
+
+ if (copy_to_user(u64_to_user_ptr(dma_info->data),
+ dma->dma_virt_addr[i],
+ dma_info->len)) {
+ dev_dbg(dev, "Failed to copy resp to user\n");
+ return ERR_PTR(-EFAULT);
}
}
- for (i = 0; i < msg->num_dma; i++)
- dma_free_coherent(dev->parent, dma_buf[i].len,
- bnxtctl->dma_virt_addr[i],
- bnxtctl->dma_addr[i]);
-err_out:
- kfree(dma_buf);
- kfree(rpc_in.msg);
-
- if (err)
- return ERR_PTR(err);
-
- return rpc_in.resp;
+ return no_free_ptr(rpc_in_resp);
}
static const struct fwctl_ops bnxtctl_ops = {
Powered by blists - more mailing lists