[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAFA6WYOww8T7fmP-LtzS-EXG0iBbKEUOS5KrUU_7i+yqd_rFfQ@mail.gmail.com>
Date: Mon, 17 Aug 2020 19:53:55 +0530
From: Sumit Garg <sumit.garg@...aro.org>
To: Doug Anderson <dianders@...omium.org>
Cc: Daniel Thompson <daniel.thompson@...aro.org>,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
linux-serial@...r.kernel.org, kgdb-bugreport@...ts.sourceforge.net,
Jiri Slaby <jslaby@...e.com>,
Russell King - ARM Linux <linux@...linux.org.uk>,
Jason Wessel <jason.wessel@...driver.com>,
LKML <linux-kernel@...r.kernel.org>,
Linux ARM <linux-arm-kernel@...ts.infradead.org>
Subject: Re: [RFC 2/5] serial: core: Add framework to allow NMI aware serial drivers
On Mon, 17 Aug 2020 at 19:27, Doug Anderson <dianders@...omium.org> wrote:
>
> Hi,
>
> On Mon, Aug 17, 2020 at 5:27 AM Sumit Garg <sumit.garg@...aro.org> wrote:
> >
> > Thanks for your suggestion, irq_work_schedule() looked even better
> > without any overhead, see below:
> >
> > diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
> > index 3082378..1eade89 100644
> > --- a/include/linux/irq_work.h
> > +++ b/include/linux/irq_work.h
> > @@ -3,6 +3,7 @@
> > #define _LINUX_IRQ_WORK_H
> >
> > #include <linux/smp_types.h>
> > +#include <linux/workqueue.h>
> >
> > /*
> > * An entry can be in one of four states:
> > @@ -24,6 +25,11 @@ struct irq_work {
> > void (*func)(struct irq_work *);
> > };
> >
> > +struct irq_work_schedule {
> > + struct irq_work work;
> > + struct work_struct *sched_work;
> > +};
> > +
> > static inline
> > void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
> > {
> > {
> > @@ -39,6 +45,7 @@ void init_irq_work(struct irq_work *work, void
> > (*func)(struct irq_work *))
> >
> > bool irq_work_queue(struct irq_work *work);
> > bool irq_work_queue_on(struct irq_work *work, int cpu);
> > +bool irq_work_schedule(struct work_struct *sched_work);
> >
> > void irq_work_tick(void);
> > void irq_work_sync(struct irq_work *work);
> > diff --git a/kernel/irq_work.c b/kernel/irq_work.c
> > index eca8396..3880316 100644
> > --- a/kernel/irq_work.c
> > +++ b/kernel/irq_work.c
> > @@ -24,6 +24,8 @@
> > static DEFINE_PER_CPU(struct llist_head, raised_list);
> > static DEFINE_PER_CPU(struct llist_head, lazy_list);
> >
> > +static struct irq_work_schedule irq_work_sched;
> > +
> > /*
> > * Claim the entry so that no one else will poke at it.
> > */
> > @@ -79,6 +81,25 @@ bool irq_work_queue(struct irq_work *work)
> > }
> > EXPORT_SYMBOL_GPL(irq_work_queue);
> >
> > +static void irq_work_schedule_fn(struct irq_work *work)
> > +{
> > + struct irq_work_schedule *irq_work_sched =
> > + container_of(work, struct irq_work_schedule, work);
> > +
> > + if (irq_work_sched->sched_work)
> > + schedule_work(irq_work_sched->sched_work);
> > +}
> > +
> > +/* Schedule work via irq work queue */
> > +bool irq_work_schedule(struct work_struct *sched_work)
> > +{
> > + init_irq_work(&irq_work_sched.work, irq_work_schedule_fn);
> > + irq_work_sched.sched_work = sched_work;
> > +
> > + return irq_work_queue(&irq_work_sched.work);
> > +}
> > +EXPORT_SYMBOL_GPL(irq_work_schedule);
>
> Wait, howzat work? There's a single global variable that you stash
> the "sched_work" into with no locking? What if two people schedule
> work at the same time?
This API is intended to be invoked from NMI context only, so I think
there will be a single user at a time. And we can make that explicit
as well:
+/* Schedule work via irq work queue */
+bool irq_work_schedule(struct work_struct *sched_work)
+{
+ if (in_nmi()) {
+ init_irq_work(&irq_work_sched.work, irq_work_schedule_fn);
+ irq_work_sched.sched_work = sched_work;
+
+ return irq_work_queue(&irq_work_sched.work);
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(irq_work_schedule);
-Sumit
>
> -Doug
Powered by blists - more mailing lists