[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <28078013-a643-af8e-22be-f36c75790ba5@linux.intel.com>
Date: Mon, 21 Oct 2024 12:32:45 +0300 (EEST)
From: Ilpo Järvinen <ilpo.jarvinen@...ux.intel.com>
To: Andy Shevchenko <andriy.shevchenko@...ux.intel.com>
cc: LKML <linux-kernel@...r.kernel.org>, platform-driver-x86@...r.kernel.org,
Andy Shevchenko <andy@...nel.org>,
Mika Westerberg <mika.westerberg@...ux.intel.com>,
Hans de Goede <hdegoede@...hat.com>, Ferry Toth <fntoth@...il.com>
Subject: Re: [PATCH v2 2/3] platform/x86: intel_scu_ipc: Simplify code with
cleanup helpers
On Mon, 21 Oct 2024, Andy Shevchenko wrote:
> Use macros defined in linux/cleanup.h to automate resource lifetime
> control in the driver. The byproduct of this change is that the
> negative conditionals are swapped by positive ones that are being
> attached to the respective calls, hence the code uses the regular
> pattern, i.e. checking for the error first.
>
> Tested-by: Ferry Toth <fntoth@...il.com>
> Signed-off-by: Andy Shevchenko <andriy.shevchenko@...ux.intel.com>
> ---
> drivers/platform/x86/intel_scu_ipc.c | 101 ++++++++++++---------------
> 1 file changed, 43 insertions(+), 58 deletions(-)
>
> diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
> index 290b38627542..3a1584ed7db8 100644
> --- a/drivers/platform/x86/intel_scu_ipc.c
> +++ b/drivers/platform/x86/intel_scu_ipc.c
> @@ -13,6 +13,7 @@
> * along with other APIs.
> */
>
> +#include <linux/cleanup.h>
> #include <linux/delay.h>
> #include <linux/device.h>
> #include <linux/errno.h>
> @@ -99,23 +100,21 @@ static struct class intel_scu_ipc_class = {
> */
> struct intel_scu_ipc_dev *intel_scu_ipc_dev_get(void)
> {
> - struct intel_scu_ipc_dev *scu = NULL;
> + guard(mutex)(&ipclock);
>
> - mutex_lock(&ipclock);
> if (ipcdev) {
> get_device(&ipcdev->dev);
> /*
> * Prevent the IPC provider from being unloaded while it
> * is being used.
> */
> - if (!try_module_get(ipcdev->owner))
> - put_device(&ipcdev->dev);
> - else
> - scu = ipcdev;
> + if (try_module_get(ipcdev->owner))
> + return ipcdev;
> +
> + put_device(&ipcdev->dev);
> }
>
> - mutex_unlock(&ipclock);
> - return scu;
> + return NULL;
> }
> EXPORT_SYMBOL_GPL(intel_scu_ipc_dev_get);
>
> @@ -289,12 +288,11 @@ static int pwr_reg_rdwr(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
>
> memset(cbuf, 0, sizeof(cbuf));
>
> - mutex_lock(&ipclock);
> + guard(mutex)(&ipclock);
> +
> scu = intel_scu_ipc_get(scu);
> - if (IS_ERR(scu)) {
> - mutex_unlock(&ipclock);
> + if (IS_ERR(scu))
> return PTR_ERR(scu);
> - }
>
> for (nc = 0; nc < count; nc++, offset += 2) {
> cbuf[offset] = addr[nc];
> @@ -319,13 +317,14 @@ static int pwr_reg_rdwr(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
> }
>
> err = intel_scu_ipc_check_status(scu);
> - if (!err) { /* Read rbuf */
> - for (nc = 0, offset = 0; nc < 4; nc++, offset += 4)
> - wbuf[nc] = ipc_data_readl(scu, offset);
> - memcpy(data, wbuf, count);
> - }
> - mutex_unlock(&ipclock);
> - return err;
> + if (err)
> + return err;
> +
> + for (nc = 0, offset = 0; nc < 4; nc++, offset += 4)
> + wbuf[nc] = ipc_data_readl(scu, offset);
> + memcpy(data, wbuf, count);
> +
> + return 0;
> }
>
> /**
> @@ -446,17 +445,15 @@ int intel_scu_ipc_dev_simple_command(struct intel_scu_ipc_dev *scu, int cmd,
> u32 cmdval;
> int err;
>
> - mutex_lock(&ipclock);
> + guard(mutex)(&ipclock);
> +
> scu = intel_scu_ipc_get(scu);
> - if (IS_ERR(scu)) {
> - mutex_unlock(&ipclock);
> + if (IS_ERR(scu))
> return PTR_ERR(scu);
> - }
>
> cmdval = sub << 12 | cmd;
> ipc_command(scu, cmdval);
> err = intel_scu_ipc_check_status(scu);
> - mutex_unlock(&ipclock);
> if (err)
> dev_err(&scu->dev, "IPC command %#x failed with %d\n", cmdval, err);
> return err;
> @@ -485,18 +482,17 @@ int intel_scu_ipc_dev_command_with_size(struct intel_scu_ipc_dev *scu, int cmd,
> {
> size_t outbuflen = DIV_ROUND_UP(outlen, sizeof(u32));
> size_t inbuflen = DIV_ROUND_UP(inlen, sizeof(u32));
> - u32 cmdval, inbuf[4] = {};
> + u32 cmdval, inbuf[4] = {}, outbuf[4] = {};
> int i, err;
>
> if (inbuflen > 4 || outbuflen > 4)
> return -EINVAL;
>
> - mutex_lock(&ipclock);
> + guard(mutex)(&ipclock);
> +
> scu = intel_scu_ipc_get(scu);
> - if (IS_ERR(scu)) {
> - mutex_unlock(&ipclock);
> + if (IS_ERR(scu))
> return PTR_ERR(scu);
> - }
>
> memcpy(inbuf, in, inlen);
> for (i = 0; i < inbuflen; i++)
> @@ -505,20 +501,17 @@ int intel_scu_ipc_dev_command_with_size(struct intel_scu_ipc_dev *scu, int cmd,
> cmdval = (size << 16) | (sub << 12) | cmd;
> ipc_command(scu, cmdval);
> err = intel_scu_ipc_check_status(scu);
> -
> - if (!err) {
> - u32 outbuf[4] = {};
> -
> - for (i = 0; i < outbuflen; i++)
> - outbuf[i] = ipc_data_readl(scu, 4 * i);
> -
> - memcpy(out, outbuf, outlen);
> + if (err) {
> + dev_err(&scu->dev, "IPC command %#x failed with %d\n", cmdval, err);
> + return err;
> }
>
> - mutex_unlock(&ipclock);
> - if (err)
> - dev_err(&scu->dev, "IPC command %#x failed with %d\n", cmdval, err);
> - return err;
> + for (i = 0; i < outbuflen; i++)
> + outbuf[i] = ipc_data_readl(scu, 4 * i);
> +
> + memcpy(out, outbuf, outlen);
> +
> + return 0;
> }
> EXPORT_SYMBOL(intel_scu_ipc_dev_command_with_size);
>
> @@ -572,18 +565,15 @@ __intel_scu_ipc_register(struct device *parent,
> struct intel_scu_ipc_dev *scu;
> void __iomem *ipc_base;
>
> - mutex_lock(&ipclock);
> + guard(mutex)(&ipclock);
> +
> /* We support only one IPC */
> - if (ipcdev) {
> - err = -EBUSY;
> - goto err_unlock;
> - }
> + if (ipcdev)
> + return ERR_PTR(-EBUSY);
>
> scu = kzalloc(sizeof(*scu), GFP_KERNEL);
> - if (!scu) {
> - err = -ENOMEM;
> - goto err_unlock;
> - }
> + if (!scu)
> + return ERR_PTR(-ENOMEM);
>
> scu->owner = owner;
> scu->dev.parent = parent;
> @@ -621,13 +611,11 @@ __intel_scu_ipc_register(struct device *parent,
> err = device_register(&scu->dev);
> if (err) {
> put_device(&scu->dev);
> - goto err_unlock;
> + return ERR_PTR(err);
> }
>
> /* Assign device at last */
> ipcdev = scu;
> - mutex_unlock(&ipclock);
> -
> return scu;
>
> err_unmap:
> @@ -636,9 +624,6 @@ __intel_scu_ipc_register(struct device *parent,
> release_mem_region(scu_data->mem.start, resource_size(&scu_data->mem));
> err_free:
> kfree(scu);
> -err_unlock:
> - mutex_unlock(&ipclock);
> -
> return ERR_PTR(err);
> }
> EXPORT_SYMBOL_GPL(__intel_scu_ipc_register);
> @@ -652,12 +637,12 @@ EXPORT_SYMBOL_GPL(__intel_scu_ipc_register);
> */
> void intel_scu_ipc_unregister(struct intel_scu_ipc_dev *scu)
> {
> - mutex_lock(&ipclock);
> + guard(mutex)(&ipclock);
> +
> if (!WARN_ON(!ipcdev)) {
> ipcdev = NULL;
> device_unregister(&scu->dev);
> }
> - mutex_unlock(&ipclock);
> }
> EXPORT_SYMBOL_GPL(intel_scu_ipc_unregister);
IMO, this change is doing too many things at once and it's hard to justify
why those changes must be kept in the same patch. If the guard() change
is done first and only then the logic reversions, both patches would
probably be near trivial to review for correctness.
--
i.
Powered by blists - more mailing lists