[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20250724121046.65f640db@jic23-huawei>
Date: Thu, 24 Jul 2025 12:10:46 +0100
From: Jonathan Cameron <jic23@...nel.org>
To: David Lechner <dlechner@...libre.com>
Cc: Nuno Sá <nuno.sa@...log.com>, Andy Shevchenko
<andy@...nel.org>, linux-iio@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v2] iio: proximity: sx9500: use stack allocated struct
for scan data
On Tue, 22 Jul 2025 14:35:02 -0500
David Lechner <dlechner@...libre.com> wrote:
> Use a stack-allocated struct in sx9500_trigger_handler() to hold the
> IIO buffer scan data. Since the scan buffer isn't used outside of this
> function, it doesn't need to be in struct sx9500_data.
>
> By always allocating enough space for the maximum number of channels,
> we can avoid having to reallocate the buffer each time buffered reads
> are enabled.
>
> Signed-off-by: David Lechner <dlechner@...libre.com>
Applied.
If anyone is bored, switching the remainder of the unwinding done manually
in the remove() for devm based cleanup looks straight forward.
Jonathan
> ---
> Changes in v2:
> - Replaced `IIO_DECLARE_BUFFER_WITH_TS()` with struct.
> - I didn't pick up Andy's review tag since I don't consider this a trivial
> change and deserves a 2nd look.
> - Link to v1: https://lore.kernel.org/r/20250711-iio-use-more-iio_declare_buffer_with_ts-4-v1-1-1a1e521cf747@baylibre.com
> ---
> drivers/iio/proximity/sx9500.c | 27 ++++++---------------------
> 1 file changed, 6 insertions(+), 21 deletions(-)
>
> diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c
> index 05844f17a15f6980ab7d55536e5fecfc5e4fe8c0..6c67bae7488c4533ea513597f182af504a22c86d 100644
> --- a/drivers/iio/proximity/sx9500.c
> +++ b/drivers/iio/proximity/sx9500.c
> @@ -88,7 +88,6 @@ struct sx9500_data {
> bool prox_stat[SX9500_NUM_CHANNELS];
> bool event_enabled[SX9500_NUM_CHANNELS];
> bool trigger_enabled;
> - u16 *buffer;
> /* Remember enabled channels and sample rate during suspend. */
> unsigned int suspend_ctrl0;
> struct completion completion;
> @@ -578,22 +577,6 @@ static int sx9500_write_event_config(struct iio_dev *indio_dev,
> return ret;
> }
>
> -static int sx9500_update_scan_mode(struct iio_dev *indio_dev,
> - const unsigned long *scan_mask)
> -{
> - struct sx9500_data *data = iio_priv(indio_dev);
> -
> - mutex_lock(&data->mutex);
> - kfree(data->buffer);
> - data->buffer = kzalloc(indio_dev->scan_bytes, GFP_KERNEL);
> - mutex_unlock(&data->mutex);
> -
> - if (data->buffer == NULL)
> - return -ENOMEM;
> -
> - return 0;
> -}
> -
> static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
> "2.500000 3.333333 5 6.666666 8.333333 11.111111 16.666666 33.333333");
>
> @@ -612,7 +595,6 @@ static const struct iio_info sx9500_info = {
> .write_raw = &sx9500_write_raw,
> .read_event_config = &sx9500_read_event_config,
> .write_event_config = &sx9500_write_event_config,
> - .update_scan_mode = &sx9500_update_scan_mode,
> };
>
> static int sx9500_set_trigger_state(struct iio_trigger *trig,
> @@ -649,6 +631,10 @@ static irqreturn_t sx9500_trigger_handler(int irq, void *private)
> struct iio_dev *indio_dev = pf->indio_dev;
> struct sx9500_data *data = iio_priv(indio_dev);
> int val, bit, ret, i = 0;
> + struct {
> + u16 chan[SX9500_NUM_CHANNELS];
> + aligned_s64 timestamp;
> + } scan = { };
>
> mutex_lock(&data->mutex);
>
> @@ -658,10 +644,10 @@ static irqreturn_t sx9500_trigger_handler(int irq, void *private)
> if (ret < 0)
> goto out;
>
> - data->buffer[i++] = val;
> + scan.chan[i++] = val;
> }
>
> - iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
> + iio_push_to_buffers_with_timestamp(indio_dev, &scan,
> iio_get_time_ns(indio_dev));
>
> out:
> @@ -984,7 +970,6 @@ static void sx9500_remove(struct i2c_client *client)
> iio_triggered_buffer_cleanup(indio_dev);
> if (client->irq > 0)
> iio_trigger_unregister(data->trig);
> - kfree(data->buffer);
> }
>
> static int sx9500_suspend(struct device *dev)
>
> ---
> base-commit: cd2731444ee4e35db76f4fb587f12d327eec5446
> change-id: 20250711-iio-use-more-iio_declare_buffer_with_ts-4-66ddcde563fe
>
> Best regards,
Powered by blists - more mailing lists