lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aN6F7Qw7wZAYpHCB@tzungbi-laptop>
Date: Thu, 2 Oct 2025 22:02:21 +0800
From: Tzung-Bi Shih <tzungbi@...nel.org>
To: Mary Strodl <mstrodl@....rit.edu>
Cc: linux-kernel@...r.kernel.org, linus.walleij@...aro.org, brgl@...ev.pl,
	linux-gpio@...r.kernel.org
Subject: Re: [PATCH v2 1/3] gpio: mpsse: use rcu to ensure worker is torn down

On Tue, Sep 23, 2025 at 09:33:02AM -0400, Mary Strodl wrote:
> @@ -261,9 +273,8 @@ static int gpio_mpsse_direction_input(struct gpio_chip *chip,
>  
>  	guard(mutex)(&priv->io_mutex);
>  	priv->gpio_dir[bank] &= ~BIT(bank_offset);
> -	gpio_mpsse_set_bank(priv, bank);
>  
> -	return 0;
> +	return gpio_mpsse_set_bank(priv, bank);

The change looks irrelevant to the patch.

> +static void gpio_mpsse_poll(struct work_struct *my_work)
>  {
[...]
> +	/*
> +	 * We only want one worker. Workers race to acquire irq_race and tear
> +	 * down all other workers. This is a cond guard so that we don't deadlock
> +	 * trying to cancel a worker.
> +	 */
> +	scoped_cond_guard(mutex_try, ;, &priv->irq_race) {
> +		scoped_guard(rcu) {
> +			list_for_each_entry_rcu(worker, &priv->workers, list) {

I'm not sure: doesn't it need to use list_for_each_entry_safe() (or variants)
as elements may be removed in the loop?

> +				/* Don't stop ourselves */
> +				if (worker == my_worker)
> +					continue;
> +
> +				scoped_guard(raw_spinlock_irqsave, &priv->irq_spin)
> +					list_del_rcu(&worker->list);

If RCU is using, does it still need to acquire the spinlock?  Alternatively,
could it use the spinlock to protect the list so that it doesn't need RCU at
all?

>  static void gpio_mpsse_irq_disable(struct irq_data *irqd)
>  {
> +	struct mpsse_worker *worker;
>  	struct mpsse_priv *priv = irq_data_get_irq_chip_data(irqd);
>  
>  	atomic_and(~BIT(irqd->hwirq), &priv->irq_enabled);
>  	gpiochip_disable_irq(&priv->gpio, irqd->hwirq);
> +
> +	/* Can't actually do teardown in IRQ context (blocks...) */
> +	scoped_guard(rcu)
> +		list_for_each_entry_rcu(worker, &priv->workers, list)
> +			atomic_set(&worker->cancelled, 1);
>  }
>  
>  static void gpio_mpsse_irq_enable(struct irq_data *irqd)
>  {
> +	struct mpsse_worker *worker;
>  	struct mpsse_priv *priv = irq_data_get_irq_chip_data(irqd);
>  
>  	gpiochip_enable_irq(&priv->gpio, irqd->hwirq);
>  	/* If no-one else was using the IRQ, enable it */
>  	if (!atomic_fetch_or(BIT(irqd->hwirq), &priv->irq_enabled)) {
> -		INIT_WORK(&priv->irq_work, gpio_mpsse_poll);
> -		schedule_work(&priv->irq_work);
> +		/*
> +		 * Can't be devm because it uses a non-raw spinlock (illegal in
> +		 * this context, where a raw spinlock is held by our caller)
> +		 */
> +		worker = kzalloc(sizeof(*worker), GFP_KERNEL);

I'm not sure: however it seems this function may be in IRQ context too (as
gpio_mpsse_irq_disable() does).  GFP_KERNEL can sleep.

> +		if (!worker)
> +			return;
> +
> +		worker->priv = priv;
> +		INIT_LIST_HEAD(&worker->list);
> +		INIT_WORK(&worker->work, gpio_mpsse_poll);
> +		schedule_work(&worker->work);
> +
> +		scoped_guard(raw_spinlock_irqsave, &priv->irq_spin)
> +			list_add_rcu(&worker->list, &priv->workers);

Doesn't it need a synchronize_rcu()?

>  static void gpio_mpsse_disconnect(struct usb_interface *intf)
>  {
> +	struct mpsse_worker *worker;
>  	struct mpsse_priv *priv = usb_get_intfdata(intf);
> +	struct list_head destructors = LIST_HEAD_INIT(destructors);
> +
> +	/*
> +	 * Lock prevents double-free of worker from here and the teardown
> +	 * step at the beginning of gpio_mpsse_poll
> +	 */
> +	scoped_guard(mutex, &priv->irq_race) {
> +		scoped_guard(rcu) {
> +			list_for_each_entry_rcu(worker, &priv->workers, list) {
> +				scoped_guard(raw_spinlock_irqsave, &priv->irq_spin)
> +					list_del_rcu(&worker->list);
> +
> +				/* Give worker a chance to terminate itself */
> +				atomic_set(&worker->cancelled, 1);
> +				/* Keep track of stuff to cancel */
> +				INIT_LIST_HEAD(&worker->destroy);
> +				list_add(&worker->destroy, &destructors);
> +			}
> +		}
> +		/* Make sure list consumers are finished before we tear down */
> +		synchronize_rcu();
> +		list_for_each_entry(worker, &destructors, destroy)
> +			gpio_mpsse_stop(worker);
> +	}

The code block is very similar to block in gpio_mpsse_poll() above.  Could
consider to use a function to prevent duplicate code.

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ