[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20251230110225.3655707-4-ekansh.gupta@oss.qualcomm.com>
Date: Tue, 30 Dec 2025 16:32:25 +0530
From: Ekansh Gupta <ekansh.gupta@....qualcomm.com>
To: srini@...nel.org, linux-arm-msm@...r.kernel.org
Cc: gregkh@...uxfoundation.org, quic_bkumar@...cinc.com,
linux-kernel@...r.kernel.org, quic_chennak@...cinc.com,
dri-devel@...ts.freedesktop.org, arnd@...db.de,
dmitry.baryshkov@....qualcomm.com
Subject: [PATCH v3 3/3] misc: fastrpc: Support mapping userspace-allocated buffers
Currently, FastRPC only supports mapping buffers allocated by the
kernel. This limits flexibility for applications that allocate memory
in userspace using rpcmem or DMABUF and need to share it with the DSP.
Add support for mapping and unmapping userspace-allocated buffers to
the DSP through SMMU. This includes handling map requests for rpcmem
and DMABUF-backed memory and providing corresponding unmap
functionality.
Signed-off-by: Ekansh Gupta <ekansh.gupta@....qualcomm.com>
---
drivers/misc/fastrpc.c | 97 +++++++++++++++++++++++++++++++++++++-----
1 file changed, 86 insertions(+), 11 deletions(-)
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 24064efe5dd8..b677e485bade 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -1854,8 +1854,10 @@ static int fastrpc_req_munmap_impl(struct fastrpc_user *fl, struct fastrpc_buf *
static int fastrpc_req_munmap(struct fastrpc_user *fl, char __user *argp)
{
struct fastrpc_buf *buf = NULL, *iter, *b;
+ struct fastrpc_map *map = NULL, *iterm, *m;
struct fastrpc_req_munmap req;
struct device *dev = fl->sctx->dev;
+ int err;
if (copy_from_user(&req, argp, sizeof(req)))
return -EFAULT;
@@ -1869,13 +1871,42 @@ static int fastrpc_req_munmap(struct fastrpc_user *fl, char __user *argp)
}
spin_unlock(&fl->lock);
- if (!buf) {
- dev_err(dev, "buffer not found: addr=%p [len=0x%08llx]\n",
+ if (buf) {
+ err = fastrpc_req_munmap_impl(fl, buf);
+ if (err) {
+ spin_lock(&fl->lock);
+ list_add_tail(&buf->node, &fl->mmaps);
+ spin_unlock(&fl->lock);
+ }
+ return err;
+ }
+
+ spin_lock(&fl->lock);
+ list_for_each_entry_safe(iterm, m, &fl->maps, node) {
+ if (iterm->raddr == req.vaddrout) {
+ map = iterm;
+ list_del(&iterm->node);
+ break;
+ }
+ }
+ spin_unlock(&fl->lock);
+ if (!map) {
+ dev_dbg(dev, "buffer/map not found addr=%p len=0x%08llx\n",
(void *)(unsigned long)req.vaddrout, req.size);
return -EINVAL;
}
- return fastrpc_req_munmap_impl(fl, buf);
+ err = fastrpc_req_munmap_dsp(fl, map->raddr, map->size);
+ if (err) {
+ dev_dbg(dev, "unmap error: fd=%d, raddr=%p\n",
+ map->fd, (void *)(unsigned long)map->raddr);
+ spin_lock(&fl->lock);
+ list_add_tail(&map->node, &fl->maps);
+ spin_unlock(&fl->lock);
+ } else {
+ fastrpc_map_put(map);
+ }
+ return err;
}
static int fastrpc_req_map_dsp(struct fastrpc_user *fl, u64 phys,
@@ -1989,25 +2020,69 @@ static int fastrpc_req_buf_alloc(struct fastrpc_user *fl,
return err;
}
-static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
+static int fastrpc_req_map_create(struct fastrpc_user *fl,
+ struct fastrpc_req_mmap req,
+ char __user *argp)
{
- struct fastrpc_req_mmap req;
+ struct fastrpc_map *map = NULL;
+ struct device *dev = fl->sctx->dev;
+ u64 raddr = 0;
int err;
- if (copy_from_user(&req, argp, sizeof(req)))
- return -EFAULT;
+ err = fastrpc_map_create(fl, req.fd, req.size, 0, &map);
+ if (err) {
+ dev_err(dev, "failed to map buffer, fd = %d\n", req.fd);
+ return err;
+ }
+
+ err = fastrpc_req_map_dsp(fl, map->phys, map->size, req.flags,
+ req.vaddrin, &raddr);
+ if (err)
+ goto err_invoke;
- if (req.flags != ADSP_MMAP_ADD_PAGES && req.flags != ADSP_MMAP_REMOTE_HEAP_ADDR) {
- dev_err(fl->sctx->dev, "flag not supported 0x%x\n", req.flags);
+ /* update the buffer to be able to deallocate the memory on the DSP */
+ map->raddr = (u64)raddr;
- return -EINVAL;
+ /* let the client know the address to use */
+ req.vaddrout = raddr;
+ dev_dbg(dev, "mmap OK: raddr=%p [len=0x%08llx]\n",
+ (void *)(unsigned long)map->raddr, map->size);
+
+ if (copy_to_user(argp, &req, sizeof(req))) {
+ err = -EFAULT;
+ goto err_copy;
}
- err = fastrpc_req_buf_alloc(fl, req, argp);
+ return 0;
+err_copy:
+ fastrpc_req_munmap_dsp(fl, map->raddr, map->size);
+err_invoke:
+ fastrpc_map_put(map);
return err;
}
+static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
+{
+ struct fastrpc_req_mmap req;
+ int err;
+
+ if (copy_from_user(&req, argp, sizeof(req)))
+ return -EFAULT;
+
+ if ((req.flags == ADSP_MMAP_ADD_PAGES ||
+ req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR)) {
+ err = fastrpc_req_buf_alloc(fl, req, argp);
+ if (err)
+ return err;
+ } else {
+ err = fastrpc_req_map_create(fl, req, argp);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
static int fastrpc_req_mem_unmap_impl(struct fastrpc_user *fl, struct fastrpc_mem_unmap *req)
{
struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
--
2.34.1
Powered by blists - more mailing lists