lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Fri, 30 Apr 2021 19:09:00 +0100
From:   Jonathan Cameron <Jonathan.Cameron@...wei.com>
To:     Mauro Carvalho Chehab <mchehab+huawei@...nel.org>
CC:     <linuxarm@...wei.com>, <mauro.chehab@...wei.com>,
        Ezequiel Garcia <ezequiel@...labora.com>,
        Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
        Mauro Carvalho Chehab <mchehab@...nel.org>,
        Philipp Zabel <p.zabel@...gutronix.de>,
        <devel@...verdev.osuosl.org>, <linux-kernel@...r.kernel.org>,
        <linux-media@...r.kernel.org>, <linux-rockchip@...ts.infradead.org>
Subject: Re: [PATCH v4 78/79] media: hantro: use pm_runtime_resume_and_get()

On Wed, 28 Apr 2021 16:52:39 +0200
Mauro Carvalho Chehab <mchehab+huawei@...nel.org> wrote:

> Commit dd8088d5a896 ("PM: runtime: Add pm_runtime_resume_and_get to deal with usage counter")
> added pm_runtime_resume_and_get() in order to automatically handle
> dev->power.usage_count decrement on errors.
> 
> While there's nothing wrong with the current usage on this driver,
> as we're getting rid of the pm_runtime_get_sync() call all over
> the media subsystem, let's remove the last occurrence on this
> driver.

Not sure there is nothing wrong in here before your patch...


> 
> Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@...nel.org>
> ---
>  drivers/staging/media/hantro/hantro_drv.c | 23 ++++++++++++++++-------
>  1 file changed, 16 insertions(+), 7 deletions(-)
> 
> diff --git a/drivers/staging/media/hantro/hantro_drv.c b/drivers/staging/media/hantro/hantro_drv.c
> index 595e82a82728..25fa36e7e773 100644
> --- a/drivers/staging/media/hantro/hantro_drv.c
> +++ b/drivers/staging/media/hantro/hantro_drv.c
> @@ -56,14 +56,12 @@ dma_addr_t hantro_get_ref(struct hantro_ctx *ctx, u64 ts)
>  	return hantro_get_dec_buf_addr(ctx, buf);
>  }
>  
> -static void hantro_job_finish(struct hantro_dev *vpu,
> -			      struct hantro_ctx *ctx,
> -			      enum vb2_buffer_state result)
> +static void hantro_job_finish_no_pm(struct hantro_dev *vpu,
> +				    struct hantro_ctx *ctx,
> +				    enum vb2_buffer_state result)
>  {
>  	struct vb2_v4l2_buffer *src, *dst;
>  
> -	pm_runtime_mark_last_busy(vpu->dev);
> -	pm_runtime_put_autosuspend(vpu->dev);
>  	clk_bulk_disable(vpu->variant->num_clocks, vpu->clocks);
>  
>  	src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
> @@ -81,6 +79,16 @@ static void hantro_job_finish(struct hantro_dev *vpu,
>  					 result);
>  }
>  
> +static void hantro_job_finish(struct hantro_dev *vpu,
> +			      struct hantro_ctx *ctx,
> +			      enum vb2_buffer_state result)
> +{
> +	pm_runtime_mark_last_busy(vpu->dev);
> +	pm_runtime_put_autosuspend(vpu->dev);
> +
> +	hantro_job_finish_no_pm(vpu, ctx, result);
> +}
> +
>  void hantro_irq_done(struct hantro_dev *vpu,
>  		     enum vb2_buffer_state result)
>  {
> @@ -155,7 +163,8 @@ static void device_run(void *priv)
>  	ret = clk_bulk_enable(ctx->dev->variant->num_clocks, ctx->dev->clocks);
>  	if (ret)
>  		goto err_cancel_job;

Before your patch, if this error condition happened, we'd call runtime_put
before the related runtime_get...  You fixed that, but the patch description
doesn't call it out.


> -	ret = pm_runtime_get_sync(ctx->dev->dev);
> +
> +	ret = pm_runtime_resume_and_get(ctx->dev->dev);
>  	if (ret < 0)
>  		goto err_cancel_job;
>  
> @@ -165,7 +174,7 @@ static void device_run(void *priv)
>  	return;
>  
>  err_cancel_job:
> -	hantro_job_finish(ctx->dev, ctx, VB2_BUF_STATE_ERROR);
> +	hantro_job_finish_no_pm(ctx->dev, ctx, VB2_BUF_STATE_ERROR);
>  }
>  
>  static struct v4l2_m2m_ops vpu_m2m_ops = {

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ