[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <CAOw6vbKygkfN_2aKjcp--pyJBCmNE0efT7bVNWh7LmirK7dO9A@mail.gmail.com>
Date: Mon, 12 Sep 2016 17:06:41 -0400
From: Sean Paul <seanpaul@...gle.com>
To: Gustavo Padovan <gustavo@...ovan.org>
Cc: dri-devel <dri-devel@...ts.freedesktop.org>,
Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
Daniel Stone <daniels@...labora.com>,
Daniel Vetter <daniel.vetter@...ll.ch>,
Rob Clark <robdclark@...il.com>,
Greg Hackmann <ghackmann@...gle.com>,
John Harrison <John.C.Harrison@...el.com>,
Laurent Pinchart <laurent.pinchart@...asonboard.com>,
Stéphane Marchesin <marcheu@...gle.com>,
m.chehab@...sung.com, Sumit Semwal <sumit.semwal@...aro.org>,
Maarten Lankhorst <maarten.lankhorst@...ux.intel.com>,
Gustavo Padovan <gustavo.padovan@...labora.co.uk>
Subject: Re: [PATCH v4] drm/fence: allow fence waiting to be interrupted by userspace
On Mon, Sep 12, 2016 at 3:08 PM, Gustavo Padovan <gustavo@...ovan.org> wrote:
> From: Gustavo Padovan <gustavo.padovan@...labora.co.uk>
>
> If userspace is running an synchronously atomic commit and interrupts the
> atomic operation during fence_wait() it will hang until the timer expires,
> so here we change the wait to be interruptible so it stop immediately when
> userspace wants to quit.
>
> Also adds the necessary error checking for fence_wait().
>
> v2: Comment by Daniel Vetter
> - Add error checking for fence_wait()
>
> v3: Rebase on top of new atomic noblocking support
>
> v4: Comment by Maarten Lankhorst
> - remove 'swapped' bitfield as it was duplicating information
>
> v5: Comments by Maarten Lankhorst
> - assign plane->state to plane_state if !intr
> - squash previous patch into this one
>
> v6: Comment by Sean Paul
> - rename intr to pre_swap
>
> Signed-off-by: Gustavo Padovan <gustavo.padovan@...labora.co.uk>
> Reviewed-by: Maarten Lankhorst <maarten.lankhorst@...ux.intel.com>
Applied to drm-misc with a couple checkpatch fixes. I also moved the
comment to directly above fence_wait (IRC acked).
Sean
> ---
> drivers/gpu/drm/drm_atomic_helper.c | 41 +++++++++++++++++++++++++++++--------
> drivers/gpu/drm/msm/msm_atomic.c | 2 +-
> include/drm/drm_atomic_helper.h | 5 +++--
> 3 files changed, 36 insertions(+), 12 deletions(-)
>
> diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
> index 6fdd7ba..c34d002 100644
> --- a/drivers/gpu/drm/drm_atomic_helper.c
> +++ b/drivers/gpu/drm/drm_atomic_helper.c
> @@ -1009,29 +1009,46 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
> * drm_atomic_helper_wait_for_fences - wait for fences stashed in plane state
> * @dev: DRM device
> * @state: atomic state object with old state structures
> + * @pre_swap: if true, do an interruptible wait
> *
> * For implicit sync, driver should fish the exclusive fence out from the
> * incoming fb's and stash it in the drm_plane_state. This is called after
> * drm_atomic_helper_swap_state() so it uses the current plane state (and
> * just uses the atomic state to find the changed planes)
> + *
> + * Returns zero if sucess or < 0 if fence_wait() fails.
> */
> -void drm_atomic_helper_wait_for_fences(struct drm_device *dev,
> - struct drm_atomic_state *state)
> +int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
> + struct drm_atomic_state *state,
> + bool pre_swap)
> {
> struct drm_plane *plane;
> struct drm_plane_state *plane_state;
> - int i;
> + int i, ret;
>
> for_each_plane_in_state(state, plane, plane_state, i) {
> - if (!plane->state->fence)
> + /*
> + * If waiting for fences pre-swap (ie: nonblock), userspace can
> + * still interrupt the operation. Instead of blocking until the
> + * timer expires, make the wait interruptible.
> + */
> + if (!pre_swap)
> + plane_state = plane->state;
> +
> + if (!plane_state->fence)
> continue;
>
> - WARN_ON(!plane->state->fb);
> + WARN_ON(!plane_state->fb);
> +
> + ret = fence_wait(plane_state->fence, pre_swap);
> + if (ret)
> + return ret;
>
> - fence_wait(plane->state->fence, false);
> - fence_put(plane->state->fence);
> - plane->state->fence = NULL;
> + fence_put(plane_state->fence);
> + plane_state->fence = NULL;
> }
> +
> + return 0;
> }
> EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences);
>
> @@ -1179,7 +1196,7 @@ static void commit_tail(struct drm_atomic_state *state)
>
> funcs = dev->mode_config.helper_private;
>
> - drm_atomic_helper_wait_for_fences(dev, state);
> + drm_atomic_helper_wait_for_fences(dev, state, false);
>
> drm_atomic_helper_wait_for_dependencies(state);
>
> @@ -1238,6 +1255,12 @@ int drm_atomic_helper_commit(struct drm_device *dev,
> if (ret)
> return ret;
>
> + if (!nonblock) {
> + ret = drm_atomic_helper_wait_for_fences(dev, state, true);
> + if (ret)
> + return ret;
> + }
> +
> /*
> * This is the point of no return - everything below never fails except
> * when the hw goes bonghits. Which means we can commit the new state on
> diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
> index 5df252c..73bae38 100644
> --- a/drivers/gpu/drm/msm/msm_atomic.c
> +++ b/drivers/gpu/drm/msm/msm_atomic.c
> @@ -112,7 +112,7 @@ static void complete_commit(struct msm_commit *c, bool async)
> struct msm_drm_private *priv = dev->dev_private;
> struct msm_kms *kms = priv->kms;
>
> - drm_atomic_helper_wait_for_fences(dev, state);
> + drm_atomic_helper_wait_for_fences(dev, state, false);
>
> kms->funcs->prepare_commit(kms, state);
>
> diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
> index f866828..7ff92b0 100644
> --- a/include/drm/drm_atomic_helper.h
> +++ b/include/drm/drm_atomic_helper.h
> @@ -45,8 +45,9 @@ int drm_atomic_helper_commit(struct drm_device *dev,
> struct drm_atomic_state *state,
> bool nonblock);
>
> -void drm_atomic_helper_wait_for_fences(struct drm_device *dev,
> - struct drm_atomic_state *state);
> +int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
> + struct drm_atomic_state *state,
> + bool pre_swap);
> bool drm_atomic_helper_framebuffer_changed(struct drm_device *dev,
> struct drm_atomic_state *old_state,
> struct drm_crtc *crtc);
> --
> 2.5.5
>
Powered by blists - more mailing lists