[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <871qf3oru4.fsf@intel.com>
Date: Tue, 12 Sep 2023 17:10:59 -0700
From: Vinicius Costa Gomes <vinicius.gomes@...el.com>
To: Xabier Marquiegui <reibax@...il.com>, netdev@...r.kernel.org
Cc: richardcochran@...il.com, horms@...nel.org,
chrony-dev@...ony.tuxfamily.org, mlichvar@...hat.com, reibax@...il.com,
ntp-lists@...tcorallo.com, shuah@...nel.org, davem@...emloft.net,
rrameshbabu@...dia.com, alex.maftei@....com
Subject: Re: [PATCH net-next v2 2/3] ptp: support multiple timestamp event
readers
Xabier Marquiegui <reibax@...il.com> writes:
> Use linked lists to create one event queue per open file. This enables
> simultaneous readers for timestamp event queues.
>
> Signed-off-by: Xabier Marquiegui <reibax@...il.com>
> Suggested-by: Richard Cochran <richardcochran@...il.com>
> ---
> v2:
> - fix ptp_poll() return value
> - Style changes to comform to checkpatch strict suggestions
> - more coherent ptp_read error exit routines
> v1: https://lore.kernel.org/netdev/20230906104754.1324412-3-reibax@gmail.com/
>
> drivers/ptp/ptp_chardev.c | 100 +++++++++++++++++++++++++++++---------
> drivers/ptp/ptp_clock.c | 6 +--
> drivers/ptp/ptp_private.h | 4 +-
> drivers/ptp/ptp_sysfs.c | 4 --
> 4 files changed, 82 insertions(+), 32 deletions(-)
>
> diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
> index 197edf1179f1..c9da0f27d204 100644
> --- a/drivers/ptp/ptp_chardev.c
> +++ b/drivers/ptp/ptp_chardev.c
> @@ -103,9 +103,39 @@ int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
>
> int ptp_open(struct posix_clock *pc, fmode_t fmode)
> {
> + struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
> + struct timestamp_event_queue *queue;
> +
> + queue = kzalloc(sizeof(*queue), GFP_KERNEL);
> + if (!queue)
> + return -EINVAL;
> + queue->reader_pid = task_pid_nr(current);
Using the pid of the task will break when using some form of file
descriptor passing. e.g. Task A opened the chardev, called the ioctl()
with some mask and then passed the fd to Task B, that's actually going
to use the fd.
Is this something that we want to support? Am I missing something?
> + list_add_tail(&queue->qlist, &ptp->tsevqs);
> +
> return 0;
> }
>
> +int ptp_release(struct posix_clock *pc)
> +{
> + struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
> + struct list_head *pos, *n;
> + struct timestamp_event_queue *element;
> + int found = -1;
> + pid_t reader_pid = task_pid_nr(current);
> +
> + list_for_each_safe(pos, n, &ptp->tsevqs) {
> + element = list_entry(pos, struct timestamp_event_queue, qlist);
> + if (element->reader_pid == reader_pid) {
> + list_del(pos);
> + kfree(element);
> + found = 0;
> + return found;
> + }
> + }
> +
> + return found;
> +}
> +
> long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
> {
> struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
> @@ -435,14 +465,24 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
> __poll_t ptp_poll(struct posix_clock *pc, struct file *fp, poll_table *wait)
> {
> struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
> + pid_t reader_pid = task_pid_nr(current);
> struct timestamp_event_queue *queue;
> + struct list_head *pos, *n;
> + bool found = false;
>
> poll_wait(fp, &ptp->tsev_wq, wait);
>
> - /* Extract only the first element in the queue list
> - * TODO: Identify the relevant queue
> - */
> - queue = list_entry(&ptp->tsevqs, struct timestamp_event_queue, qlist);
> + /* Extract only the desired element in the queue list */
> + list_for_each_safe(pos, n, &ptp->tsevqs) {
> + queue = list_entry(pos, struct timestamp_event_queue, qlist);
> + if (queue->reader_pid == reader_pid) {
> + found = true;
> + break;
> + }
> + }
> +
> + if (!found)
> + return EPOLLERR;
>
> return queue_cnt(queue) ? EPOLLIN : 0;
> }
> @@ -453,44 +493,54 @@ ssize_t ptp_read(struct posix_clock *pc,
> uint rdflags, char __user *buf, size_t cnt)
> {
> struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
> + pid_t reader_pid = task_pid_nr(current);
> struct timestamp_event_queue *queue;
> struct ptp_extts_event *event;
> + struct list_head *pos, *n;
> unsigned long flags;
> + bool found = false;
> size_t qcnt, i;
> int result;
>
> - /* Extract only the first element in the queue list
> - * TODO: Identify the relevant queue
> - */
> - queue = list_first_entry(&ptp->tsevqs, struct timestamp_event_queue,
> - qlist);
> + /* Extract only the desired element in the queue list */
> + list_for_each_safe(pos, n, &ptp->tsevqs) {
> + queue = list_entry(pos, struct timestamp_event_queue, qlist);
> + if (queue->reader_pid == reader_pid) {
> + found = true;
> + break;
> + }
> + }
>
> - if (cnt % sizeof(struct ptp_extts_event) != 0)
> - return -EINVAL;
> + if (!found) {
> + result = -EINVAL;
> + goto exit;
> + }
> +
> + if (cnt % sizeof(struct ptp_extts_event) != 0) {
> + result = -EINVAL;
> + goto exit;
> + }
>
> if (cnt > EXTTS_BUFSIZE)
> cnt = EXTTS_BUFSIZE;
>
> cnt = cnt / sizeof(struct ptp_extts_event);
>
> - if (mutex_lock_interruptible(&ptp->tsevq_mux))
> - return -ERESTARTSYS;
> -
> if (wait_event_interruptible(ptp->tsev_wq,
> ptp->defunct || queue_cnt(queue))) {
> - mutex_unlock(&ptp->tsevq_mux);
> - return -ERESTARTSYS;
> + result = -ERESTARTSYS;
> + goto exit;
> }
>
> if (ptp->defunct) {
> - mutex_unlock(&ptp->tsevq_mux);
> - return -ENODEV;
> + result = -ENODEV;
> + goto exit;
> }
>
> event = kmalloc(EXTTS_BUFSIZE, GFP_KERNEL);
> if (!event) {
> - mutex_unlock(&ptp->tsevq_mux);
> - return -ENOMEM;
> + result = -ENOMEM;
> + goto exit;
> }
>
> spin_lock_irqsave(&queue->lock, flags);
> @@ -509,12 +559,16 @@ ssize_t ptp_read(struct posix_clock *pc,
>
> cnt = cnt * sizeof(struct ptp_extts_event);
>
> - mutex_unlock(&ptp->tsevq_mux);
> -
> result = cnt;
> - if (copy_to_user(buf, event, cnt))
> + if (copy_to_user(buf, event, cnt)) {
> result = -EFAULT;
> + goto free_event;
> + }
>
> +free_event:
> kfree(event);
> +exit:
> + if (result < 0)
> + ptp_release(pc);
> return result;
> }
> diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
> index 7ac04a282ec5..d52fc23e20a8 100644
> --- a/drivers/ptp/ptp_clock.c
> +++ b/drivers/ptp/ptp_clock.c
> @@ -162,6 +162,7 @@ static struct posix_clock_operations ptp_clock_ops = {
> .clock_settime = ptp_clock_settime,
> .ioctl = ptp_ioctl,
> .open = ptp_open,
> + .release = ptp_release,
> .poll = ptp_poll,
> .read = ptp_read,
> };
> @@ -184,7 +185,6 @@ static void ptp_clock_release(struct device *dev)
>
> ptp_cleanup_pin_groups(ptp);
> kfree(ptp->vclock_index);
> - mutex_destroy(&ptp->tsevq_mux);
> mutex_destroy(&ptp->pincfg_mux);
> mutex_destroy(&ptp->n_vclocks_mux);
> ptp_clean_queue_list(ptp);
> @@ -246,10 +246,9 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
> queue = kzalloc(sizeof(*queue), GFP_KERNEL);
> if (!queue)
> goto no_memory_queue;
> + queue->reader_pid = 0;
> spin_lock_init(&queue->lock);
> list_add_tail(&queue->qlist, &ptp->tsevqs);
> - /* TODO - Transform or delete this mutex */
> - mutex_init(&ptp->tsevq_mux);
> mutex_init(&ptp->pincfg_mux);
> mutex_init(&ptp->n_vclocks_mux);
> init_waitqueue_head(&ptp->tsev_wq);
> @@ -350,7 +349,6 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
> if (ptp->kworker)
> kthread_destroy_worker(ptp->kworker);
> kworker_err:
> - mutex_destroy(&ptp->tsevq_mux);
> mutex_destroy(&ptp->pincfg_mux);
> mutex_destroy(&ptp->n_vclocks_mux);
> ptp_clean_queue_list(ptp);
> diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
> index 314c21c39f6a..046d1482bcee 100644
> --- a/drivers/ptp/ptp_private.h
> +++ b/drivers/ptp/ptp_private.h
> @@ -27,6 +27,7 @@ struct timestamp_event_queue {
> int tail;
> spinlock_t lock;
> struct list_head qlist;
> + pid_t reader_pid;
> };
>
> struct ptp_clock {
> @@ -38,7 +39,6 @@ struct ptp_clock {
> struct pps_device *pps_source;
> long dialed_frequency; /* remembers the frequency adjustment */
> struct list_head tsevqs; /* timestamp fifo list */
> - struct mutex tsevq_mux; /* one process at a time reading the fifo */
> struct mutex pincfg_mux; /* protect concurrent info->pin_config access */
> wait_queue_head_t tsev_wq;
> int defunct; /* tells readers to go away when clock is being removed */
> @@ -124,6 +124,8 @@ long ptp_ioctl(struct posix_clock *pc,
>
> int ptp_open(struct posix_clock *pc, fmode_t fmode);
>
> +int ptp_release(struct posix_clock *pc);
> +
> ssize_t ptp_read(struct posix_clock *pc,
> uint flags, char __user *buf, size_t cnt);
>
> diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
> index 2675f383cd0a..512b0164ef18 100644
> --- a/drivers/ptp/ptp_sysfs.c
> +++ b/drivers/ptp/ptp_sysfs.c
> @@ -87,9 +87,6 @@ static ssize_t extts_fifo_show(struct device *dev,
>
> memset(&event, 0, sizeof(event));
>
> - if (mutex_lock_interruptible(&ptp->tsevq_mux))
> - return -ERESTARTSYS;
> -
> spin_lock_irqsave(&queue->lock, flags);
> qcnt = queue_cnt(queue);
> if (qcnt) {
> @@ -104,7 +101,6 @@ static ssize_t extts_fifo_show(struct device *dev,
> cnt = snprintf(page, PAGE_SIZE, "%u %lld %u\n",
> event.index, event.t.sec, event.t.nsec);
> out:
> - mutex_unlock(&ptp->tsevq_mux);
> return cnt;
> }
> static DEVICE_ATTR(fifo, 0444, extts_fifo_show, NULL);
> --
> 2.34.1
>
>
Cheers,
--
Vinicius
Powered by blists - more mailing lists