lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAKGbVbu-mdY92zuR6204SVktS_e98XJM1b=gWodO_TySUP2K2Q@mail.gmail.com>
Date:   Wed, 3 Apr 2019 08:55:03 +0800
From:   Qiang Yu <yuq825@...il.com>
To:     Eric Anholt <eric@...olt.net>
Cc:     dri-devel <dri-devel@...ts.freedesktop.org>,
        Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
        david.emett@...adcom.com, thomas.spurden@...adcom.com,
        Rob Herring <robh@...nel.org>
Subject: Re: [PATCH 7/7] drm/lima: Use the drm_gem_fence_array_add helpers for
 our deps.

Indeed not that important, so patch 5&7 is:
Reviewed-and-tested-by: Qiang Yu <yuq825@...il.com>

Regards,
Qiang

On Wed, Apr 3, 2019 at 12:57 AM Eric Anholt <eric@...olt.net> wrote:
>
> Qiang Yu <yuq825@...il.com> writes:
>
> > On Tue, Apr 2, 2019 at 6:26 AM Eric Anholt <eric@...olt.net> wrote:
> >>
> >> I haven't tested this, but it's a pretty direct port of what I did for
> >> v3d.
> >>
> >> Signed-off-by: Eric Anholt <eric@...olt.net>
> >> ---
> >>  drivers/gpu/drm/lima/lima_gem.c   | 37 +----------------
> >>  drivers/gpu/drm/lima/lima_sched.c | 66 ++++++-------------------------
> >>  drivers/gpu/drm/lima/lima_sched.h |  6 +--
> >>  3 files changed, 16 insertions(+), 93 deletions(-)
> >>
> >> diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
> >> index 2d3cf96f6c58..8f80286c80b4 100644
> >> --- a/drivers/gpu/drm/lima/lima_gem.c
> >> +++ b/drivers/gpu/drm/lima/lima_gem.c
> >> @@ -144,40 +144,7 @@ static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
> >>         if (explicit)
> >>                 return 0;
> >>
> >> -       /* implicit sync use bo fence in resv obj */
> >> -       if (write) {
> >> -               unsigned nr_fences;
> >> -               struct dma_fence **fences;
> >> -               int i;
> >> -
> >> -               err = reservation_object_get_fences_rcu(
> >> -                       bo->gem.resv, NULL, &nr_fences, &fences);
> >> -               if (err || !nr_fences)
> >> -                       return err;
> >> -
> >> -               for (i = 0; i < nr_fences; i++) {
> >> -                       err = lima_sched_task_add_dep(task, fences[i]);
> >> -                       if (err)
> >> -                               break;
> >> -               }
> >> -
> >> -               /* for error case free remaining fences */
> >> -               for ( ; i < nr_fences; i++)
> >> -                       dma_fence_put(fences[i]);
> >> -
> >> -               kfree(fences);
> >> -       } else {
> >> -               struct dma_fence *fence;
> >> -
> >> -               fence = reservation_object_get_excl_rcu(bo->gem.resv);
> >> -               if (fence) {
> >> -                       err = lima_sched_task_add_dep(task, fence);
> >> -                       if (err)
> >> -                               dma_fence_put(fence);
> >> -               }
> >> -       }
> >> -
> >> -       return err;
> >> +       return drm_gem_fence_array_add_implicit(&task->deps, &bo->gem, write);
> >>  }
> >>
> >>  static int lima_gem_lock_bos(struct lima_bo **bos, u32 nr_bos,
> >> @@ -250,7 +217,7 @@ static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit)
> >>                 if (err)
> >>                         return err;
> >>
> >> -               err = lima_sched_task_add_dep(submit->task, fence);
> >> +               err = drm_gem_fence_array_add(&submit->task->deps, fence);
> >>                 if (err) {
> >>                         dma_fence_put(fence);
> >>                         return err;
> >> diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
> >> index 97bd9c1deb87..e253d031fb3d 100644
> >> --- a/drivers/gpu/drm/lima/lima_sched.c
> >> +++ b/drivers/gpu/drm/lima/lima_sched.c
> >> @@ -3,6 +3,7 @@
> >>
> >>  #include <linux/kthread.h>
> >>  #include <linux/slab.h>
> >> +#include <linux/xarray.h>
> >>
> >>  #include "lima_drv.h"
> >>  #include "lima_sched.h"
> >> @@ -126,19 +127,24 @@ int lima_sched_task_init(struct lima_sched_task *task,
> >>
> >>         task->num_bos = num_bos;
> >>         task->vm = lima_vm_get(vm);
> >> +
> >> +       xa_init_flags(&task->deps, XA_FLAGS_ALLOC);
> >> +
> >>         return 0;
> >>  }
> >>
> >>  void lima_sched_task_fini(struct lima_sched_task *task)
> >>  {
> >> +       struct dma_fence *fence;
> >> +       unsigned long index;
> >>         int i;
> >>
> >>         drm_sched_job_cleanup(&task->base);
> >>
> >> -       for (i = 0; i < task->num_dep; i++)
> >> -               dma_fence_put(task->dep[i]);
> >> -
> >> -       kfree(task->dep);
> >> +       xa_for_each(&task->deps, index, fence) {
> >> +               dma_fence_put(fence);
> >> +       }
> >> +       xa_destroy(&task->deps);
> >>
> >>         if (task->bos) {
> >>                 for (i = 0; i < task->num_bos; i++)
> >> @@ -149,42 +155,6 @@ void lima_sched_task_fini(struct lima_sched_task *task)
> >>         lima_vm_put(task->vm);
> >>  }
> >>
> >> -int lima_sched_task_add_dep(struct lima_sched_task *task, struct dma_fence *fence)
> >> -{
> >> -       int i, new_dep = 4;
> >> -
> >> -       /* same context's fence is definitly earlier then this task */
> >> -       if (fence->context == task->base.s_fence->finished.context) {
> >> -               dma_fence_put(fence);
> >> -               return 0;
> >> -       }
> >
> > Seems you dropped this check in the drm_gem_fence_array_add, no bug if we
> > don't have this, but redundant fence will be added in the deps array. Maybe we
> > can add a context parameter to drm_gem_fence_array_add and
> > drm_gem_fence_array_add_implicit to filter out fences from same
> > drm_sched_entity.
>
> Does this feel important to you?  To me, one extra slot in the array and
> a trip through the top of drm_sched_entity_add_dependency_cb() doesn't
> like it's feel worth making the API clumsier.

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ