[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1698310400-5601-4-git-send-email-quic_ekangupt@quicinc.com>
Date: Thu, 26 Oct 2023 14:23:19 +0530
From: Ekansh Gupta <quic_ekangupt@...cinc.com>
To: <srinivas.kandagatla@...aro.org>, <linux-arm-msm@...r.kernel.org>
CC: <gregkh@...uxfoundation.org>, <linux-kernel@...r.kernel.org>
Subject: [PATCH v2 3/4] misc: fastrpc: Add DSP PD notification support
Current driver design does not provide any notification regarding
the status of used PD on DSP. Only when user makes a FastRPC
invocation, they get to know if the process has been killed on
DSP. Notifying status of user PD can help users to restart the
DSP PD session.
Co-developed-by: Anandu Krishnan E <quic_anane@...cinc.com>
Signed-off-by: Anandu Krishnan E <quic_anane@...cinc.com>
Signed-off-by: Ekansh Gupta <quic_ekangupt@...cinc.com>
---
Changes in v2:
- Added Co-developer tag
drivers/misc/fastrpc.c | 145 +++++++++++++++++++++++++++++++++++++++++++-
include/uapi/misc/fastrpc.h | 8 +++
2 files changed, 152 insertions(+), 1 deletion(-)
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 45373bf..e012ff25 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -120,6 +120,8 @@
/* CPU feature information to DSP */
#define FASTRPC_CPUINFO_DEFAULT (0)
#define FASTRPC_CPUINFO_EARLY_WAKEUP (1)
+/* Process status notifications from DSP will be sent with this unique context */
+#define FASTRPC_NOTIF_CTX_RESERVED 0xABCDABCD
#define miscdev_to_fdevice(d) container_of(d, struct fastrpc_device, miscdev)
@@ -238,6 +240,13 @@ struct fastrpc_invoke_rspv2 {
u32 version; /* version number */
};
+struct dsp_notif_rsp {
+ u64 ctx; /* response context */
+ u32 type; /* Notification type */
+ int pid; /* user process pid */
+ u32 status; /* userpd status notification */
+};
+
struct fastrpc_buf_overlap {
u64 start;
u64 end;
@@ -297,6 +306,27 @@ struct fastrpc_perf {
u64 tid;
};
+struct fastrpc_notif_queue {
+ /* Number of pending status notifications in queue */
+ atomic_t notif_queue_count;
+ /* Wait queue to synchronize notifier thread and response */
+ wait_queue_head_t notif_wait_queue;
+ /* IRQ safe spin lock for protecting notif queue */
+ spinlock_t nqlock;
+};
+
+struct fastrpc_internal_notif_rsp {
+ u32 domain; /* Domain of User PD */
+ u32 session; /* Session ID of User PD */
+ u32 status; /* Status of the process */
+};
+
+struct fastrpc_notif_rsp {
+ struct list_head notifn;
+ u32 domain;
+ enum fastrpc_status_flags status;
+};
+
struct fastrpc_invoke_ctx {
int nscalars;
int nbufs;
@@ -376,10 +406,13 @@ struct fastrpc_user {
struct list_head pending;
struct list_head interrupted;
struct list_head mmaps;
+ struct list_head notif_queue;
struct fastrpc_channel_ctx *cctx;
struct fastrpc_session_ctx *sctx;
struct fastrpc_buf *init_mem;
+ /* Process status notification queue */
+ struct fastrpc_notif_queue proc_state_notif;
u32 profile;
/* Threads poll for specified timeout and fall back to glink wait */
@@ -2085,6 +2118,99 @@ static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp)
return err;
}
+static void fastrpc_queue_pd_status(struct fastrpc_user *fl, int domain, int status)
+{
+ struct fastrpc_notif_rsp *notif_rsp = NULL;
+ unsigned long flags;
+
+ notif_rsp = kzalloc(sizeof(*notif_rsp), GFP_ATOMIC);
+ if (!notif_rsp)
+ return;
+
+ notif_rsp->status = status;
+ notif_rsp->domain = domain;
+
+ spin_lock_irqsave(&fl->proc_state_notif.nqlock, flags);
+ list_add_tail(¬if_rsp->notifn, &fl->notif_queue);
+ atomic_add(1, &fl->proc_state_notif.notif_queue_count);
+ wake_up_interruptible(&fl->proc_state_notif.notif_wait_queue);
+ spin_unlock_irqrestore(&fl->proc_state_notif.nqlock, flags);
+}
+
+static void fastrpc_notif_find_process(int domain, struct fastrpc_channel_ctx *cctx, struct dsp_notif_rsp *notif)
+{
+ bool is_process_found = false;
+ unsigned long irq_flags = 0;
+ struct fastrpc_user *user;
+
+ spin_lock_irqsave(&cctx->lock, irq_flags);
+ list_for_each_entry(user, &cctx->users, user) {
+ if (user->tgid == notif->pid) {
+ is_process_found = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&cctx->lock, irq_flags);
+
+ if (!is_process_found)
+ return;
+ fastrpc_queue_pd_status(user, domain, notif->status);
+}
+
+static int fastrpc_wait_on_notif_queue(
+ struct fastrpc_internal_notif_rsp *notif_rsp,
+ struct fastrpc_user *fl)
+{
+ int err = 0;
+ unsigned long flags;
+ struct fastrpc_notif_rsp *notif, *inotif, *n;
+
+read_notif_status:
+ err = wait_event_interruptible(fl->proc_state_notif.notif_wait_queue,
+ atomic_read(&fl->proc_state_notif.notif_queue_count));
+ if (err) {
+ kfree(notif);
+ return err;
+ }
+
+ spin_lock_irqsave(&fl->proc_state_notif.nqlock, flags);
+ list_for_each_entry_safe(inotif, n, &fl->notif_queue, notifn) {
+ list_del(&inotif->notifn);
+ atomic_sub(1, &fl->proc_state_notif.notif_queue_count);
+ notif = inotif;
+ break;
+ }
+ spin_unlock_irqrestore(&fl->proc_state_notif.nqlock, flags);
+
+ if (notif) {
+ notif_rsp->status = notif->status;
+ notif_rsp->domain = notif->domain;
+ } else {// Go back to wait if ctx is invalid
+ dev_err(fl->sctx->dev, "Invalid status notification response\n");
+ goto read_notif_status;
+ }
+
+ kfree(notif);
+ return err;
+}
+
+static int fastrpc_get_notif_response(
+ struct fastrpc_internal_notif_rsp *notif,
+ void *param, struct fastrpc_user *fl)
+{
+ int err = 0;
+
+ err = fastrpc_wait_on_notif_queue(notif, fl);
+ if (err)
+ return err;
+
+ if (copy_to_user((void __user *)param, notif,
+ sizeof(struct fastrpc_internal_notif_rsp)))
+ return -EFAULT;
+
+ return 0;
+}
+
static int fastrpc_manage_poll_mode(struct fastrpc_user *fl, u32 enable, u32 timeout)
{
const unsigned int MAX_POLL_TIMEOUT_US = 10000;
@@ -2141,6 +2267,7 @@ static int fastrpc_multimode_invoke(struct fastrpc_user *fl, char __user *argp)
struct fastrpc_invoke_args *args = NULL;
struct fastrpc_ioctl_multimode_invoke invoke;
struct fastrpc_internal_control cp = {0};
+ struct fastrpc_internal_notif_rsp notif;
u32 nscalars;
u64 *perf_kernel;
int err, i;
@@ -2190,6 +2317,10 @@ static int fastrpc_multimode_invoke(struct fastrpc_user *fl, char __user *argp)
err = fastrpc_internal_control(fl, &cp);
break;
+ case FASTRPC_INVOKE_NOTIF:
+ err = fastrpc_get_notif_response(¬if,
+ (void *)invoke.invparam, fl);
+ break;
default:
err = -ENOTTY;
break;
@@ -2942,8 +3073,10 @@ static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
/* No invocations past this point */
spin_lock_irqsave(&cctx->lock, flags);
cctx->rpdev = NULL;
- list_for_each_entry(user, &cctx->users, user)
+ list_for_each_entry(user, &cctx->users, user) {
+ fastrpc_queue_pd_status(user, cctx->domain_id, FASTRPC_DSP_SSR);
fastrpc_notify_users(user);
+ }
spin_unlock_irqrestore(&cctx->lock, flags);
if (cctx->fdevice)
@@ -2994,12 +3127,22 @@ static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
struct fastrpc_invoke_rsp *rsp = data;
struct fastrpc_invoke_rspv2 *rspv2 = NULL;
+ struct dsp_notif_rsp *notif = (struct dsp_notif_rsp *)data;
struct fastrpc_invoke_ctx *ctx;
unsigned long flags;
unsigned long ctxid;
u32 rsp_flags = 0;
u32 early_wake_time = 0;
+ if (notif->ctx == FASTRPC_NOTIF_CTX_RESERVED) {
+ if (notif->type == STATUS_RESPONSE && len >= sizeof(*notif)) {
+ fastrpc_notif_find_process(cctx->domain_id, cctx, notif);
+ return 0;
+ } else {
+ return -ENOENT;
+ }
+ }
+
if (len < sizeof(*rsp))
return -EINVAL;
diff --git a/include/uapi/misc/fastrpc.h b/include/uapi/misc/fastrpc.h
index 3dfd8e9..1f544a3 100644
--- a/include/uapi/misc/fastrpc.h
+++ b/include/uapi/misc/fastrpc.h
@@ -192,4 +192,12 @@ enum fastrpc_perfkeys {
PERF_KEY_MAX = 10,
};
+enum fastrpc_status_flags {
+ FASTRPC_USERPD_UP = 0,
+ FASTRPC_USERPD_EXIT = 1,
+ FASTRPC_USERPD_FORCE_KILL = 2,
+ FASTRPC_USERPD_EXCEPTION = 3,
+ FASTRPC_DSP_SSR = 4,
+};
+
#endif /* __QCOM_FASTRPC_H__ */
--
2.7.4
Powered by blists - more mailing lists