[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20160630142236.GB28577@nuc-i3427.alporthouse.com>
Date: Thu, 30 Jun 2016 15:22:36 +0100
From: Chris Wilson <chris@...is-wilson.co.uk>
To: Gustavo Padovan <gustavo@...ovan.org>
Cc: dri-devel@...ts.freedesktop.org, linux-kernel@...r.kernel.org,
Daniel Stone <daniels@...labora.com>,
Daniel Vetter <daniel.vetter@...ll.ch>,
Rob Clark <robdclark@...il.com>,
Greg Hackmann <ghackmann@...gle.com>,
John Harrison <John.C.Harrison@...el.com>,
laurent.pinchart@...asonboard.com, seanpaul@...gle.com,
marcheu@...gle.com, m.chehab@...sung.com,
Sumit Semwal <sumit.semwal@...aro.org>,
Maarten Lankhorst <maarten.lankhorst@...ux.intel.com>,
Gustavo Padovan <gustavo.padovan@...labora.co.uk>,
Christian König <christian.koenig@....com>
Subject: Re: [PATCH 2/2] dma-buf/sync_file: rework fence storage in struct
file
On Thu, Jun 30, 2016 at 11:09:48AM -0300, Gustavo Padovan wrote:
> From: Gustavo Padovan <gustavo.padovan@...labora.co.uk>
>
> Create sync_file->fence to abstract the type of fence we are using for
> each sync_file. If only one fence is present we use a normal struct fence
> but if there is more fences to be added to the sync_file a fence_array
> is created.
>
> This change cleans up sync_file a bit. We don't need to have sync_file_cb
> array anymore. Instead, as we always have one fence, only one fence
> callback is registered per sync_file.
>
> v3: Comments from Chris Wilson and Christian König
> - struct sync_file lost status member in favor of fence_is_signaled()
> - drop use of fence_array_teardown()
> - use sizeof(*fence) to allocate only an array on fence pointers
>
> v2: Comments from Chris Wilson and Christian König
> - Not using fence_ops anymore
> - fence_is_array() was created to differentiate fence from fence_array
> - fence_array_teardown() is now exported and used under fence_is_array()
> - struct sync_file lost num_fences member
>
> Cc: Chris Wilson <chris@...is-wilson.co.uk>
> Cc: Christian König <christian.koenig@....com>
> Signed-off-by: Gustavo Padovan <gustavo.padovan@...labora.co.uk>
> ---
> drivers/dma-buf/sync_file.c | 163 ++++++++++++++++++++++-------------
> drivers/staging/android/sync_debug.c | 13 ++-
> include/linux/sync_file.h | 17 ++--
> 3 files changed, 118 insertions(+), 75 deletions(-)
>
> diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
> index 9aaa608..7a5aaae 100644
> --- a/drivers/dma-buf/sync_file.c
> +++ b/drivers/dma-buf/sync_file.c
> @@ -28,11 +28,11 @@
>
> static const struct file_operations sync_file_fops;
>
> -static struct sync_file *sync_file_alloc(int size)
> +static struct sync_file *sync_file_alloc(void)
> {
> struct sync_file *sync_file;
>
> - sync_file = kzalloc(size, GFP_KERNEL);
> + sync_file = kzalloc(sizeof(*sync_file), GFP_KERNEL);
> if (!sync_file)
> return NULL;
>
> @@ -45,6 +45,8 @@ static struct sync_file *sync_file_alloc(int size)
>
> init_waitqueue_head(&sync_file->wq);
>
> + INIT_LIST_HEAD(&sync_file->cb.node);
> +
> return sync_file;
>
> err:
> @@ -54,14 +56,11 @@ err:
>
> static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
> {
> - struct sync_file_cb *check;
> struct sync_file *sync_file;
>
> - check = container_of(cb, struct sync_file_cb, cb);
> - sync_file = check->sync_file;
> + sync_file = container_of(cb, struct sync_file, cb);
>
> - if (atomic_dec_and_test(&sync_file->status))
> - wake_up_all(&sync_file->wq);
> + wake_up_all(&sync_file->wq);
> }
>
> /**
> @@ -76,22 +75,18 @@ struct sync_file *sync_file_create(struct fence *fence)
> {
> struct sync_file *sync_file;
>
> - sync_file = sync_file_alloc(offsetof(struct sync_file, cbs[1]));
> + sync_file = sync_file_alloc();
> if (!sync_file)
> return NULL;
>
> - sync_file->num_fences = 1;
> - atomic_set(&sync_file->status, 1);
> + sync_file->fence = fence;
> +
> snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d",
> fence->ops->get_driver_name(fence),
> fence->ops->get_timeline_name(fence), fence->context,
> fence->seqno);
>
> - sync_file->cbs[0].fence = fence;
> - sync_file->cbs[0].sync_file = sync_file;
> - if (fence_add_callback(fence, &sync_file->cbs[0].cb,
> - fence_check_cb_func))
> - atomic_dec(&sync_file->status);
> + fence_add_callback(fence, &sync_file->cb, fence_check_cb_func);
>
> return sync_file;
> }
> @@ -121,14 +116,48 @@ err:
> return NULL;
> }
>
> -static void sync_file_add_pt(struct sync_file *sync_file, int *i,
> - struct fence *fence)
> +static int sync_file_set_fence(struct sync_file *sync_file,
> + struct fence **fences, int num_fences)
> +{
> + struct fence_array *array;
> +
> + /*
> + * The reference for the fences in the new sync_file and holded
s/holded/held/
> + * in add_fence() during the merge procedure, so for num_fences == 1
> + * we already own a new reference to the fence. For num_fence > 1
> + * we own the reference of the fence_array creation.
> + */
Thanks, that really does help understanding the reference handling.
> + if (num_fences == 1) {
> + sync_file->fence = fences[0];
> + } else {
> + array = fence_array_create(num_fences, fences,
> + fence_context_alloc(1), 1, false);
> + if (!array)
> + return -ENOMEM;
> +
> + sync_file->fence = (struct fence *)array;
I'd prefer sync_file->fence = &array->base;
> + }
> +
> + return 0;
> +}
> +
> +static struct fence **get_fences(struct sync_file *sync_file, int *num_fences)
> +{
> + if (fence_is_array(sync_file->fence)) {
> + struct fence_array *array = to_fence_array(sync_file->fence);
> + *num_fences = array->num_fences;
> + return array->fences;
> + } else {
> + *num_fences = 1;
> + return &sync_file->fence;
> + }
> +}
> +
> +static void add_fence(struct fence **fences, int *i, struct fence *fence)
> {
> - sync_file->cbs[*i].fence = fence;
> - sync_file->cbs[*i].sync_file = sync_file;
> + fences[*i] = fence;
>
> - if (!fence_add_callback(fence, &sync_file->cbs[*i].cb,
> - fence_check_cb_func)) {
> + if (!fence_is_signaled(fence)) {
> fence_get(fence);
> (*i)++;
> }
> @@ -147,16 +176,21 @@ static void sync_file_add_pt(struct sync_file *sync_file, int *i,
> static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
> struct sync_file *b)
> {
> - int num_fences = a->num_fences + b->num_fences;
> struct sync_file *sync_file;
> - int i, i_a, i_b;
> - unsigned long size = offsetof(struct sync_file, cbs[num_fences]);
> + struct fence **fences, **a_fences, **b_fences;
> + int i, i_a, i_b, num_fences, a_num_fences, b_num_fences;
>
> - sync_file = sync_file_alloc(size);
> + sync_file = sync_file_alloc();
> if (!sync_file)
> return NULL;
>
> - atomic_set(&sync_file->status, num_fences);
> + a_fences = get_fences(a, &a_num_fences);
> + b_fences = get_fences(b, &b_num_fences);
> + num_fences = a_num_fences + b_num_fences;
Do we need overflow paranoia here? Probably, as this is on a user ioctl
path.
> +
> + fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL);
> + if (!fences)
> + goto err;
>
> /*
> * Assume sync_file a and b are both ordered and have no
> @@ -165,55 +199,66 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
> * If a sync_file can only be created with sync_file_merge
> * and sync_file_create, this is a reasonable assumption.
> */
> - for (i = i_a = i_b = 0; i_a < a->num_fences && i_b < b->num_fences; ) {
> - struct fence *pt_a = a->cbs[i_a].fence;
> - struct fence *pt_b = b->cbs[i_b].fence;
> + for (i = i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) {
> + struct fence *pt_a = a_fences[i_a];
> + struct fence *pt_b = b_fences[i_b];
>
> if (pt_a->context < pt_b->context) {
> - sync_file_add_pt(sync_file, &i, pt_a);
> + add_fence(fences, &i, pt_a);
>
> i_a++;
> } else if (pt_a->context > pt_b->context) {
> - sync_file_add_pt(sync_file, &i, pt_b);
> + add_fence(fences, &i, pt_b);
>
> i_b++;
> } else {
> if (pt_a->seqno - pt_b->seqno <= INT_MAX)
> - sync_file_add_pt(sync_file, &i, pt_a);
> + add_fence(fences, &i, pt_a);
> else
> - sync_file_add_pt(sync_file, &i, pt_b);
> + add_fence(fences, &i, pt_b);
>
> i_a++;
> i_b++;
> }
> }
>
> - for (; i_a < a->num_fences; i_a++)
> - sync_file_add_pt(sync_file, &i, a->cbs[i_a].fence);
> + for (; i_a < a_num_fences; i_a++)
> + add_fence(fences, &i, a_fences[i_a]);
> +
> + for (; i_b < b_num_fences; i_b++)
> + add_fence(fences, &i, b_fences[i_b]);
> +
> + if (num_fences > i) {
> + fences = krealloc(fences, i * sizeof(**fences),
Hmm, still too large, sizeof(*fences);
Looking pretty good. Putting fence-array to good use.
With those minor tweaks,
Reviewed-by: Chris Wilson <chris@...is-wilson.co.uk>
-Chris
--
Chris Wilson, Intel Open Source Technology Centre
Powered by blists - more mailing lists