[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190614152242.GC23020@phenom.ffwll.local>
Date: Fri, 14 Jun 2019 17:22:42 +0200
From: Daniel Vetter <daniel@...ll.ch>
To: Peter Zijlstra <peterz@...radead.org>
Cc: Christian König
<ckoenig.leichtzumerken@...il.com>, daniel@...ll.ch,
l.stach@...gutronix.de, linux+etnaviv@...linux.org.uk,
christian.gmeiner@...il.com, yuq825@...il.com, eric@...olt.net,
thellstrom@...are.com, dri-devel@...ts.freedesktop.org,
linux-kernel@...r.kernel.org, etnaviv@...ts.freedesktop.org,
lima@...ts.freedesktop.org
Subject: Re: [PATCH 3/6] drm/gem: use new ww_mutex_(un)lock_for_each macros
On Fri, Jun 14, 2019 at 03:19:16PM +0200, Peter Zijlstra wrote:
> On Fri, Jun 14, 2019 at 02:41:22PM +0200, Christian König wrote:
> > Use the provided macros instead of implementing deadlock handling on our own.
> >
> > Signed-off-by: Christian König <christian.koenig@....com>
> > ---
> > drivers/gpu/drm/drm_gem.c | 49 ++++++++++-----------------------------
> > 1 file changed, 12 insertions(+), 37 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
> > index 50de138c89e0..6e4623d3bee2 100644
> > --- a/drivers/gpu/drm/drm_gem.c
> > +++ b/drivers/gpu/drm/drm_gem.c
> > @@ -1307,51 +1307,26 @@ int
> > drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
> > struct ww_acquire_ctx *acquire_ctx)
> > {
> > - int contended = -1;
> > + struct ww_mutex *contended;
> > int i, ret;
> >
> > ww_acquire_init(acquire_ctx, &reservation_ww_class);
> >
> > -retry:
> > - if (contended != -1) {
> > - struct drm_gem_object *obj = objs[contended];
> > -
> > - ret = ww_mutex_lock_slow_interruptible(&obj->resv->lock,
> > - acquire_ctx);
> > - if (ret) {
> > - ww_acquire_done(acquire_ctx);
> > - return ret;
> > - }
> > - }
> > -
> > - for (i = 0; i < count; i++) {
> > - if (i == contended)
> > - continue;
> > -
> > - ret = ww_mutex_lock_interruptible(&objs[i]->resv->lock,
> > - acquire_ctx);
> > - if (ret) {
> > - int j;
> > -
> > - for (j = 0; j < i; j++)
> > - ww_mutex_unlock(&objs[j]->resv->lock);
> > -
> > - if (contended != -1 && contended >= i)
> > - ww_mutex_unlock(&objs[contended]->resv->lock);
> > -
> > - if (ret == -EDEADLK) {
> > - contended = i;
> > - goto retry;
> > - }
> > -
> > - ww_acquire_done(acquire_ctx);
> > - return ret;
> > - }
> > - }
>
> I note all the sites you use this on are simple idx iterators; so how
> about something like so:
>
> int ww_mutex_unlock_all(int count, void *data, struct ww_mutex *(*func)(int, void *))
> {
> int i;
>
> for (i = 0; i < count; i++) {
> lock = func(i, data);
> ww_mutex_unlock(lock);
> }
> }
>
> int ww_mutex_lock_all(int count, struct ww_acquire_context *acquire_ctx, bool intr,
> void *data, struct ww_mutex *(*func)(int, void *))
> {
> int i, ret, contended = -1;
> struct ww_mutex *lock;
>
> retry:
> if (contended != -1) {
> lock = func(contended, data);
> if (intr)
> ret = ww_mutex_lock_slow_interruptible(lock, acquire_ctx);
> else
> ret = ww_mutex_lock_slow(lock, acquire_ctx), 0;
>
> if (ret) {
> ww_acquire_done(acquire_ctx);
> return ret;
> }
> }
>
> for (i = 0; i < count; i++) {
> if (i == contended)
> continue;
>
> lock = func(i, data);
> if (intr)
> ret = ww_mutex_lock_interruptible(lock, acquire_ctx);
> else
> ret = ww_mutex_lock(lock, acquire_ctx), 0;
>
> if (ret) {
> ww_mutex_unlock_all(i, data, func);
> if (contended > i) {
> lock = func(contended, data);
> ww_mutex_unlock(lock);
> }
>
> if (ret == -EDEADLK) {
> contended = i;
> goto retry;
> }
>
> ww_acquire_done(acquire_ctx);
> return ret;
> }
> }
>
> ww_acquire_done(acquire_ctx);
> return 0;
> }
>
> > + ww_mutex_lock_for_each(for (i = 0; i < count; i++),
> > + &objs[i]->resv->lock, contended, ret, true,
> > + acquire_ctx)
> > + if (ret)
> > + goto error;
>
> which then becomes:
>
> struct ww_mutex *gem_ww_mutex_func(int i, void *data)
> {
> struct drm_gem_object **objs = data;
> return &objs[i]->resv->lock;
> }
>
> ret = ww_mutex_lock_all(count, acquire_ctx, true, objs, gem_ww_mutex_func);
>
> > ww_acquire_done(acquire_ctx);
> >
> > return 0;
> > +
> > +error:
> > + ww_mutex_unlock_for_each(for (i = 0; i < count; i++),
> > + &objs[i]->resv->lock, contended);
> > + ww_acquire_done(acquire_ctx);
> > + return ret;
> > }
> > EXPORT_SYMBOL(drm_gem_lock_reservations);
Another idea, entirely untested (I guess making sure that we can use the
same iterator for both locking and unlocking in the contended case will be
fun), but maybe something like this:
WW_MUTEX_LOCK_BEGIN();
driver_for_each_loop (iter, pos) {
WW_MUTEX_LOCK(&pos->ww_mutex);
}
WW_MUTEX_LOCK_END();
That way we can reuse any and all iterators that'll ever show up at least.
It's still horrible because the macros need to jump around between all of
them. Would also make this useful for more cases, where maybe you need to
trylock some lru lock to get at your next ww_mutex, or do some
kref_get_unless_zero. Buffer eviction loops tend to acquire these, and
that would all get ugly real fast if we'd need to stuff it into some
iterator argument.
This is kinda what we went with for modeset locks with
DRM_MODESET_LOCK_ALL_BEGIN/END, you can grab more locks in between the
pair at least. But it's a lot more limited use-cases, maybe too fragile an
idea for ww_mutex in full generality.
Not going to type this out because too much w/e mode here already, but I
can give it a stab next week.
-Daniel
--
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
Powered by blists - more mailing lists