[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CACvgo51719VsgNqiTO-RnZxXruRvtuMgJ1v5oaq4x5Lrniuaaw@mail.gmail.com>
Date: Tue, 11 Apr 2023 12:07:14 +0100
From: Emil Velikov <emil.l.velikov@...il.com>
To: Dmitry Osipenko <dmitry.osipenko@...labora.com>
Cc: David Airlie <airlied@...hat.com>,
Gerd Hoffmann <kraxel@...hat.com>,
Gurchetan Singh <gurchetansingh@...omium.org>,
Chia-I Wu <olvaffe@...il.com>, Daniel Vetter <daniel@...ll.ch>,
Rob Clark <robdclark@...il.com>,
Marek Olšák <maraeo@...il.com>,
Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@....com>,
Emil Velikov <emil.velikov@...labora.com>,
kernel@...labora.com, linux-kernel@...r.kernel.org,
dri-devel@...ts.freedesktop.org,
virtualization@...ts.linux-foundation.org
Subject: Re: [PATCH v5 3/3] drm/virtio: Support sync objects
Hi Dmitry,
On Sun, 9 Apr 2023 at 13:40, Dmitry Osipenko
<dmitry.osipenko@...labora.com> wrote:
> +static void virtio_gpu_free_syncobjs(struct drm_syncobj **syncobjs,
> + uint32_t nr_syncobjs)
> +{
> + uint32_t i = nr_syncobjs;
> +
> + while (i--) {
> + if (syncobjs[i])
> + drm_syncobj_put(syncobjs[i]);
> + }
> +
> + kvfree(syncobjs);
> +}
> +
> +static void virtio_gpu_reset_syncobjs(struct drm_syncobj **syncobjs,
> + uint32_t nr_syncobjs)
> +{
> + uint32_t i;
> +
> + for (i = 0; i < nr_syncobjs; i++) {
> + if (syncobjs[i])
> + drm_syncobj_replace_fence(syncobjs[i], NULL);
> + }
> +}
> +
Can I bribe you (with cookies) about dropping the NULL checks above?
They're dead code and rather misleading IMHO.
> +static void
> +virtio_gpu_free_post_deps(struct virtio_gpu_submit_post_dep *post_deps,
> + uint32_t nr_syncobjs)
> +{
> + uint32_t i = nr_syncobjs;
> +
> + while (i--) {
> + kfree(post_deps[i].chain);
> + drm_syncobj_put(post_deps[i].syncobj);
> + }
> +
> + kvfree(post_deps);
> +}
> +
> +static int virtio_gpu_parse_post_deps(struct virtio_gpu_submit *submit)
> +{
> + struct drm_virtgpu_execbuffer *exbuf = submit->exbuf;
> + struct drm_virtgpu_execbuffer_syncobj syncobj_desc;
> + struct virtio_gpu_submit_post_dep *post_deps;
> + u32 num_out_syncobjs = exbuf->num_out_syncobjs;
> + size_t syncobj_stride = exbuf->syncobj_stride;
> + int ret = 0, i;
> +
> + if (!num_out_syncobjs)
> + return 0;
> +
> + post_deps = kvcalloc(num_out_syncobjs, sizeof(*post_deps), GFP_KERNEL);
> + if (!post_deps)
> + return -ENOMEM;
> +
> + for (i = 0; i < num_out_syncobjs; i++) {
> + uint64_t address = exbuf->out_syncobjs + i * syncobj_stride;
> +
For my own education: what's the specifics/requirements behind the
stride? is there a use-case for the stride to vary across in/out
syncobj?
Off the top of my head: userspace could have an array of larger
structs, each embedding an syncobj. Thus passing the stride, the
kernel will fetch/update them in-place w/o caring about the other
data.
Or perhaps there is another trick that userspace utilises the stride for?
> + if (copy_from_user(&syncobj_desc,
> + u64_to_user_ptr(address),
> + min(syncobj_stride, sizeof(syncobj_desc)))) {
> + ret = -EFAULT;
> + break;
> + }
> +
We seem to be parsing garbage(?) stack data in the syncobj_stride <
sizeof(syncobj_desc) case.
Zeroing/reseting the syncobj_desc on each iteration is one approach -
be that fully or in part. Alternatively we could error out on
syncobj_stride < sizeof(syncobj_desc).
> + post_deps[i].point = syncobj_desc.point;
> +
> + if (syncobj_desc.flags) {
> + ret = -EINVAL;
> + break;
> + }
> +
> + if (syncobj_desc.point) {
> + post_deps[i].chain = dma_fence_chain_alloc();
> + if (!post_deps[i].chain) {
> + ret = -ENOMEM;
> + break;
> + }
> + }
> +
> + post_deps[i].syncobj = drm_syncobj_find(submit->file,
> + syncobj_desc.handle);
> + if (!post_deps[i].syncobj) {
> + ret = -EINVAL;
I think we want a kfree(chain) here. Otherwise we'll leak it, right?
> + break;
> + }
> + }
> +
> + if (ret) {
> + virtio_gpu_free_post_deps(post_deps, i);
> + return ret;
> + }
> +
> + submit->num_out_syncobjs = num_out_syncobjs;
> + submit->post_deps = post_deps;
> +
> + return 0;
> +}
> +
With the two issues in virtio_gpu_parse_post_deps() addressed, the series is:
Reviewed-by; Emil Velikov <emil.velikov@...labora.com>
HTH
Emil
Powered by blists - more mailing lists