[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20251128103428.1119696-2-ekansh.gupta@oss.qualcomm.com>
Date: Fri, 28 Nov 2025 16:04:27 +0530
From: Ekansh Gupta <ekansh.gupta@....qualcomm.com>
To: srini@...nel.org, linux-arm-msm@...r.kernel.org
Cc: gregkh@...uxfoundation.org, quic_bkumar@...cinc.com,
linux-kernel@...r.kernel.org, quic_chennak@...cinc.com,
dri-devel@...ts.freedesktop.org, arnd@...db.de,
dmitry.baryshkov@....qualcomm.com
Subject: [PATCH v2 1/2] misc: fastrpc: Refactor mmap and munmap logic into helper functions
Refactor FastRPC mmap and munmap handling by introducing dedicated
helper functions for DSP-side operations. This change improves code
readability and separates DSP invocation logic from buffer allocation
and cleanup.
Signed-off-by: Ekansh Gupta <ekansh.gupta@....qualcomm.com>
---
drivers/misc/fastrpc.c | 110 +++++++++++++++++++++++++++--------------
1 file changed, 74 insertions(+), 36 deletions(-)
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index ee652ef01534..9bf76e224852 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -1811,24 +1811,33 @@ static int fastrpc_get_dsp_info(struct fastrpc_user *fl, char __user *argp)
return 0;
}
-static int fastrpc_req_munmap_impl(struct fastrpc_user *fl, struct fastrpc_buf *buf)
+static int fastrpc_req_munmap_dsp(struct fastrpc_user *fl, u64 raddr, u64 size)
{
struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
struct fastrpc_munmap_req_msg req_msg;
- struct device *dev = fl->sctx->dev;
int err;
u32 sc;
req_msg.client_id = fl->client_id;
- req_msg.size = buf->size;
- req_msg.vaddr = buf->raddr;
+ req_msg.size = size;
+ req_msg.vaddr = raddr;
- args[0].ptr = (u64) (uintptr_t) &req_msg;
+ args[0].ptr = (u64) &req_msg;
args[0].length = sizeof(req_msg);
sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MUNMAP, 1, 0);
err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
&args[0]);
+
+ return err;
+}
+
+static int fastrpc_req_munmap_impl(struct fastrpc_user *fl, struct fastrpc_buf *buf)
+{
+ struct device *dev = fl->sctx->dev;
+ int err;
+
+ err = fastrpc_req_munmap_dsp(fl, buf->raddr, buf->size);
if (!err) {
dev_dbg(dev, "unmmap\tpt 0x%09lx OK\n", buf->raddr);
spin_lock(&fl->lock);
@@ -1869,26 +1878,54 @@ static int fastrpc_req_munmap(struct fastrpc_user *fl, char __user *argp)
return fastrpc_req_munmap_impl(fl, buf);
}
-static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
+static int fastrpc_req_map_dsp(struct fastrpc_user *fl, u64 phys,
+ u64 size, u32 flag, u64 vaddrin,
+ u64 *raddr)
{
struct fastrpc_invoke_args args[3] = { [0 ... 2] = { 0 } };
- struct fastrpc_buf *buf = NULL;
struct fastrpc_mmap_req_msg req_msg;
struct fastrpc_mmap_rsp_msg rsp_msg;
struct fastrpc_phy_page pages;
- struct fastrpc_req_mmap req;
- struct device *dev = fl->sctx->dev;
int err;
u32 sc;
- if (copy_from_user(&req, argp, sizeof(req)))
- return -EFAULT;
+ req_msg.client_id = fl->client_id;
+ req_msg.flags = flag;
+ req_msg.vaddr = vaddrin;
+ req_msg.num = sizeof(pages);
- if (req.flags != ADSP_MMAP_ADD_PAGES && req.flags != ADSP_MMAP_REMOTE_HEAP_ADDR) {
- dev_err(dev, "flag not supported 0x%x\n", req.flags);
+ args[0].ptr = (u64)&req_msg;
+ args[0].length = sizeof(req_msg);
- return -EINVAL;
+ pages.addr = phys;
+ pages.size = size;
+
+ args[1].ptr = (u64)&pages;
+ args[1].length = sizeof(pages);
+
+ args[2].ptr = (u64)&rsp_msg;
+ args[2].length = sizeof(rsp_msg);
+ sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MMAP, 2, 1);
+ err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
+ &args[0]);
+
+ if (err) {
+ dev_err(fl->sctx->dev, "mmap error (len 0x%08llx)\n", size);
+ return err;
}
+ *raddr = rsp_msg.vaddr;
+
+ return 0;
+}
+
+static int fastrpc_req_buf_alloc(struct fastrpc_user *fl,
+ struct fastrpc_req_mmap req,
+ char __user *argp)
+{
+ struct device *dev = fl->sctx->dev;
+ struct fastrpc_buf *buf = NULL;
+ u64 raddr = 0;
+ int err;
if (req.vaddrin) {
dev_err(dev, "adding user allocated pages is not supported\n");
@@ -1905,26 +1942,8 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
return err;
}
- req_msg.client_id = fl->client_id;
- req_msg.flags = req.flags;
- req_msg.vaddr = req.vaddrin;
- req_msg.num = sizeof(pages);
-
- args[0].ptr = (u64) (uintptr_t) &req_msg;
- args[0].length = sizeof(req_msg);
-
- pages.addr = buf->phys;
- pages.size = buf->size;
-
- args[1].ptr = (u64) (uintptr_t) &pages;
- args[1].length = sizeof(pages);
-
- args[2].ptr = (u64) (uintptr_t) &rsp_msg;
- args[2].length = sizeof(rsp_msg);
-
- sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MMAP, 2, 1);
- err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
- &args[0]);
+ err = fastrpc_req_map_dsp(fl, buf->phys, buf->size, req.flags,
+ req.vaddrin, &raddr);
if (err) {
dev_err(dev, "mmap error (len 0x%08llx)\n", buf->size);
fastrpc_buf_free(buf);
@@ -1932,10 +1951,10 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
}
/* update the buffer to be able to deallocate the memory on the DSP */
- buf->raddr = (uintptr_t) rsp_msg.vaddr;
+ buf->raddr = (uintptr_t)raddr;
/* let the client know the address to use */
- req.vaddrout = rsp_msg.vaddr;
+ req.vaddrout = raddr;
/* Add memory to static PD pool, protection thru hypervisor */
if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) {
@@ -1970,6 +1989,25 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
return err;
}
+static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
+{
+ struct fastrpc_req_mmap req;
+ int err;
+
+ if (copy_from_user(&req, argp, sizeof(req)))
+ return -EFAULT;
+
+ if (req.flags != ADSP_MMAP_ADD_PAGES && req.flags != ADSP_MMAP_REMOTE_HEAP_ADDR) {
+ dev_err(fl->sctx->dev, "flag not supported 0x%x\n", req.flags);
+
+ return -EINVAL;
+ }
+
+ err = fastrpc_req_buf_alloc(fl, req, argp);
+
+ return err;
+}
+
static int fastrpc_req_mem_unmap_impl(struct fastrpc_user *fl, struct fastrpc_mem_unmap *req)
{
struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
--
2.34.1
Powered by blists - more mailing lists