[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <2024041111-handsaw-scruffy-27f3@gregkh>
Date: Thu, 11 Apr 2024 15:34:23 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: Elizabeth Figura <zfigura@...eweavers.com>
Cc: Arnd Bergmann <arnd@...db.de>, Jonathan Corbet <corbet@....net>,
Shuah Khan <shuah@...nel.org>, linux-kernel@...r.kernel.org,
linux-api@...r.kernel.org, wine-devel@...ehq.org,
André Almeida <andrealmeid@...lia.com>,
Wolfram Sang <wsa@...nel.org>,
Arkadiusz Hiler <ahiler@...eweavers.com>,
Peter Zijlstra <peterz@...radead.org>,
Andy Lutomirski <luto@...nel.org>, linux-doc@...r.kernel.org,
linux-kselftest@...r.kernel.org,
Randy Dunlap <rdunlap@...radead.org>
Subject: Re: [PATCH v3 04/30] ntsync: Introduce NTSYNC_IOC_WAIT_ANY.
On Thu, Mar 28, 2024 at 07:05:55PM -0500, Elizabeth Figura wrote:
> This corresponds to part of the functionality of the NT syscall
> NtWaitForMultipleObjects(). Specifically, it implements the behaviour where
> the third argument (wait_any) is TRUE, and it does not handle alertable waits.
> Those features have been split out into separate patches to ease review.
>
> NTSYNC_IOC_WAIT_ANY is a vectored wait function similar to poll(). Unlike
> poll(), it "consumes" objects when they are signaled. For semaphores, this means
> decreasing one from the internal counter. At most one object can be consumed by
> this function.
>
> Up to 64 objects can be waited on at once. As soon as one is signaled, the
> object with the lowest index is consumed, and that index is returned via the
> "index" field.
So it's kind of like our internal locks already? Or futex?
>
> A timeout is supported. The timeout is passed as a u64 nanosecond value, which
> represents absolute time measured against either the MONOTONIC or REALTIME clock
> (controlled by the flags argument). If U64_MAX is passed, the ioctl waits
> indefinitely.
>
> This ioctl validates that all objects belong to the relevant device. This is not
> necessary for any technical reason related to NTSYNC_IOC_WAIT_ANY, but will be
> necessary for NTSYNC_IOC_WAIT_ALL introduced in the following patch.
>
> Two u32s of padding are left in the ntsync_wait_args structure; one will be used
> by a patch later in the series (which is split out to ease review).
>
> Signed-off-by: Elizabeth Figura <zfigura@...eweavers.com>
> ---
> drivers/misc/ntsync.c | 250 ++++++++++++++++++++++++++++++++++++
> include/uapi/linux/ntsync.h | 16 +++
> 2 files changed, 266 insertions(+)
>
> diff --git a/drivers/misc/ntsync.c b/drivers/misc/ntsync.c
> index 3c2f743c58b0..c6f84a5fc8c0 100644
> --- a/drivers/misc/ntsync.c
> +++ b/drivers/misc/ntsync.c
> @@ -6,11 +6,16 @@
> */
>
> #include <linux/anon_inodes.h>
> +#include <linux/atomic.h>
> #include <linux/file.h>
> #include <linux/fs.h>
> +#include <linux/hrtimer.h>
> +#include <linux/ktime.h>
> #include <linux/miscdevice.h>
> #include <linux/module.h>
> #include <linux/overflow.h>
> +#include <linux/sched.h>
> +#include <linux/sched/signal.h>
> #include <linux/slab.h>
> #include <linux/spinlock.h>
> #include <uapi/linux/ntsync.h>
> @@ -30,6 +35,8 @@ enum ntsync_type {
> *
> * Both rely on struct file for reference counting. Individual
> * ntsync_obj objects take a reference to the device when created.
> + * Wait operations take a reference to each object being waited on for
> + * the duration of the wait.
> */
>
> struct ntsync_obj {
> @@ -47,12 +54,56 @@ struct ntsync_obj {
> __u32 max;
> } sem;
> } u;
> +
> + struct list_head any_waiters;
> +};
> +
> +struct ntsync_q_entry {
> + struct list_head node;
> + struct ntsync_q *q;
> + struct ntsync_obj *obj;
> + __u32 index;
> +};
> +
> +struct ntsync_q {
> + struct task_struct *task;
> + __u32 owner;
> +
> + /*
> + * Protected via atomic_try_cmpxchg(). Only the thread that wins the
> + * compare-and-swap may actually change object states and wake this
> + * task.
> + */
> + atomic_t signaled;
This feels odd, why are you duplicating a normal lock functionality
here?
> +
> + __u32 count;
> + struct ntsync_q_entry entries[];
> };
>
> struct ntsync_device {
> struct file *file;
> };
>
> +static void try_wake_any_sem(struct ntsync_obj *sem)
> +{
> + struct ntsync_q_entry *entry;
> +
> + lockdep_assert_held(&sem->lock);
> +
> + list_for_each_entry(entry, &sem->any_waiters, node) {
> + struct ntsync_q *q = entry->q;
> + int signaled = -1;
> +
> + if (!sem->u.sem.count)
> + break;
> +
> + if (atomic_try_cmpxchg(&q->signaled, &signaled, entry->index)) {
> + sem->u.sem.count--;
> + wake_up_process(q->task);
> + }
You are waking up _all_ "locks" that with the atomic_try_cmpxchg() call,
right? Not just the "first".
Or am I confused?
> + }
> +}
> +
> /*
> * Actually change the semaphore state, returning -EOVERFLOW if it is made
> * invalid.
> @@ -88,6 +139,8 @@ static int ntsync_sem_post(struct ntsync_obj *sem, void __user *argp)
>
> prev_count = sem->u.sem.count;
> ret = post_sem_state(sem, args);
> + if (!ret)
> + try_wake_any_sem(sem);
>
> spin_unlock(&sem->lock);
>
> @@ -141,6 +194,7 @@ static struct ntsync_obj *ntsync_alloc_obj(struct ntsync_device *dev,
> obj->dev = dev;
> get_file(dev->file);
> spin_lock_init(&obj->lock);
> + INIT_LIST_HEAD(&obj->any_waiters);
>
> return obj;
> }
> @@ -191,6 +245,200 @@ static int ntsync_create_sem(struct ntsync_device *dev, void __user *argp)
> return put_user(fd, &user_args->sem);
> }
>
> +static struct ntsync_obj *get_obj(struct ntsync_device *dev, int fd)
> +{
> + struct file *file = fget(fd);
> + struct ntsync_obj *obj;
> +
> + if (!file)
> + return NULL;
> +
> + if (file->f_op != &ntsync_obj_fops) {
> + fput(file);
> + return NULL;
> + }
> +
> + obj = file->private_data;
> + if (obj->dev != dev) {
> + fput(file);
> + return NULL;
> + }
> +
> + return obj;
> +}
> +
> +static void put_obj(struct ntsync_obj *obj)
> +{
> + fput(obj->file);
> +}
> +
> +static int ntsync_schedule(const struct ntsync_q *q, const struct ntsync_wait_args *args)
> +{
> + ktime_t timeout = ns_to_ktime(args->timeout);
> + clockid_t clock = CLOCK_MONOTONIC;
> + ktime_t *timeout_ptr;
> + int ret = 0;
> +
> + timeout_ptr = (args->timeout == U64_MAX ? NULL : &timeout);
> +
> + if (args->flags & NTSYNC_WAIT_REALTIME)
> + clock = CLOCK_REALTIME;
> +
> + do {
> + if (signal_pending(current)) {
> + ret = -ERESTARTSYS;
> + break;
> + }
> +
> + set_current_state(TASK_INTERRUPTIBLE);
> + if (atomic_read(&q->signaled) != -1) {
> + ret = 0;
> + break;
What happens if the value changes right after you read it?
Rolling your own lock is tricky, and needs review from the locking
maintainers. And probably some more documentation as to what is
happening and why our normal types of locks can't be used here?
thanks,
greg k-h
Powered by blists - more mailing lists