lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAF6AEGvVmV4fesd0MsSo-4WxSVqOFN-U+p5HOE2job6CeYbqTA@mail.gmail.com>
Date:   Tue, 8 Mar 2022 17:12:18 -0800
From:   Rob Clark <robdclark@...il.com>
To:     Dmitry Osipenko <dmitry.osipenko@...labora.com>
Cc:     David Airlie <airlied@...ux.ie>, Gerd Hoffmann <kraxel@...hat.com>,
        Gurchetan Singh <gurchetansingh@...omium.org>,
        Chia-I Wu <olvaffe@...il.com>, Daniel Vetter <daniel@...ll.ch>,
        Daniel Almeida <daniel.almeida@...labora.com>,
        Gert Wollny <gert.wollny@...labora.com>,
        Tomeu Vizoso <tomeu.vizoso@...labora.com>,
        Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
        "open list:VIRTIO GPU DRIVER" 
        <virtualization@...ts.linux-foundation.org>,
        Gustavo Padovan <gustavo.padovan@...labora.com>,
        dri-devel <dri-devel@...ts.freedesktop.org>,
        Dmitry Osipenko <digetx@...il.com>
Subject: Re: [PATCH v1 5/5] drm/virtio: Add memory shrinker

On Tue, Mar 8, 2022 at 5:17 AM Dmitry Osipenko
<dmitry.osipenko@...labora.com> wrote:
>
> Add memory shrinker and new madvise IOCTL to the VirtIO-GPU driver.
> Userspace (BO cache manager of Mesa driver) will mark BOs as "don't need"
> using the new IOCTL to let shrinker purge the marked BOs on OOM, thus
> shrinker will lower memory pressure and prevent OOM kills.
>
> Signed-off-by: Daniel Almeida <daniel.almeida@...labora.com>
> Signed-off-by: Dmitry Osipenko <dmitry.osipenko@...labora.com>
> ---
>  drivers/gpu/drm/virtio/Makefile               |   3 +-
>  drivers/gpu/drm/virtio/virtgpu_drv.h          |  26 +++-
>  drivers/gpu/drm/virtio/virtgpu_gem.c          |  84 ++++++++++++
>  drivers/gpu/drm/virtio/virtgpu_gem_shrinker.c | 124 ++++++++++++++++++
>  drivers/gpu/drm/virtio/virtgpu_ioctl.c        |  37 ++++++
>  drivers/gpu/drm/virtio/virtgpu_kms.c          |  10 ++
>  drivers/gpu/drm/virtio/virtgpu_object.c       |   7 +
>  drivers/gpu/drm/virtio/virtgpu_plane.c        |  17 ++-
>  drivers/gpu/drm/virtio/virtgpu_vq.c           |  15 +++
>  include/uapi/drm/virtgpu_drm.h                |  14 ++
>  10 files changed, 333 insertions(+), 4 deletions(-)
>  create mode 100644 drivers/gpu/drm/virtio/virtgpu_gem_shrinker.c
>

[snip]

> diff --git a/drivers/gpu/drm/virtio/virtgpu_gem_shrinker.c b/drivers/gpu/drm/virtio/virtgpu_gem_shrinker.c
> new file mode 100644
> index 000000000000..39eb9a3e7e4a
> --- /dev/null
> +++ b/drivers/gpu/drm/virtio/virtgpu_gem_shrinker.c
> @@ -0,0 +1,124 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +/*
> + * Copyright (C) 2022 Collabora Ltd.
> + */
> +
> +#include <linux/dma-mapping.h>
> +#include <linux/shmem_fs.h>
> +
> +#include "virtgpu_drv.h"
> +
> +static unsigned long
> +virtio_gpu_gem_shrinker_count_objects(struct shrinker *shrinker,
> +                                     struct shrink_control *sc)
> +{
> +       struct drm_gem_shmem_object *shmem;
> +       struct virtio_gpu_device *vgdev;
> +       unsigned long count = 0;
> +       bool empty = true;
> +
> +       vgdev = container_of(shrinker, struct virtio_gpu_device,
> +                            vgshrinker.shrinker);
> +
> +       if (!mutex_trylock(&vgdev->mm_lock))
> +               return 0;

One bit of advice from previously dealing with shrinker and heavy
memory pressure situations (turns out 4GB chromebooks can be pretty
much under *constant* memory pressure):

You *really* want to make shrinker->count_objects lockless.. and
minimize the lock contention on shrinker->scan_objects (ie.  The
problem is you can end up with shrinking going on on all CPU cores in
parallel, you want to not funnel that thru one lock as much as
possible.

See in particular:

25ed38b3ed26 ("drm/msm: Drop mm_lock in scan loop")
cc8a4d5a1bd8 ("drm/msm: Avoid mutex in shrinker_count()")

BR,
-R

> +       list_for_each_entry(shmem, &vgdev->vgshrinker.list, madv_list) {
> +               empty = false;
> +
> +               if (!mutex_trylock(&shmem->pages_lock))
> +                       continue;
> +
> +               if (drm_gem_shmem_is_purgeable(shmem))
> +                       count += shmem->base.size >> PAGE_SHIFT;
> +
> +               mutex_unlock(&shmem->pages_lock);
> +       }
> +
> +       mutex_unlock(&vgdev->mm_lock);
> +
> +       return empty ? SHRINK_EMPTY : count;
> +}
> +
> +static bool virtio_gpu_gem_shrinker_purge(struct virtio_gpu_device *vgdev,
> +                                         struct drm_gem_object *obj)
> +{
> +       struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
> +       struct drm_gem_shmem_object *shmem = &bo->base;
> +       int err;
> +
> +       if (!dma_resv_test_signaled(obj->resv, true) ||
> +           !drm_gem_shmem_is_purgeable(shmem) ||
> +           refcount_read(&bo->pin_count))
> +               return false;
> +
> +       /*
> +        * Release host's memory before guest's memory is gone to ensure that
> +        * host won't touch released memory of the guest.
> +        */
> +       err = virtio_gpu_gem_host_mem_release(bo);
> +       if (err)
> +               return false;
> +
> +       list_del_init(&shmem->madv_list);
> +       drm_gem_shmem_purge_locked(shmem);
> +
> +       return true;
> +}
> +
> +static unsigned long
> +virtio_gpu_gem_shrinker_scan_objects(struct shrinker *shrinker,
> +                                    struct shrink_control *sc)
> +{
> +       struct drm_gem_shmem_object *shmem, *tmp;
> +       struct virtio_gpu_device *vgdev;
> +       unsigned long freed = 0;
> +
> +       vgdev = container_of(shrinker, struct virtio_gpu_device,
> +                            vgshrinker.shrinker);
> +
> +       if (!mutex_trylock(&vgdev->mm_lock))
> +               return SHRINK_STOP;
> +
> +       list_for_each_entry_safe(shmem, tmp, &vgdev->vgshrinker.list, madv_list) {
> +               if (freed >= sc->nr_to_scan)
> +                       break;
> +
> +               if (!dma_resv_trylock(shmem->base.resv))
> +                       continue;
> +
> +               if (!mutex_trylock(&shmem->pages_lock))
> +                       goto resv_unlock;
> +
> +               if (virtio_gpu_gem_shrinker_purge(vgdev, &shmem->base))
> +                       freed += shmem->base.size >> PAGE_SHIFT;
> +
> +               mutex_unlock(&shmem->pages_lock);
> +resv_unlock:
> +               dma_resv_unlock(shmem->base.resv);
> +       }
> +
> +       mutex_unlock(&vgdev->mm_lock);
> +
> +       return freed;
> +}
> +
> +int virtio_gpu_gem_shrinker_init(struct virtio_gpu_device *vgdev)
> +{
> +       struct shrinker *shrinker = &vgdev->vgshrinker.shrinker;
> +
> +       shrinker->count_objects = virtio_gpu_gem_shrinker_count_objects;
> +       shrinker->scan_objects = virtio_gpu_gem_shrinker_scan_objects;
> +       shrinker->seeks = DEFAULT_SEEKS;
> +
> +       INIT_LIST_HEAD(&vgdev->vgshrinker.list);
> +
> +       return register_shrinker(shrinker);
> +}
> +
> +void virtio_gpu_gem_shrinker_fini(struct virtio_gpu_device *vgdev)
> +{
> +       struct shrinker *shrinker = &vgdev->vgshrinker.shrinker;
> +
> +       unregister_shrinker(shrinker);
> +}

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ