[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ed9142da-ce10-7df2-8a85-ba9ad0c26551@suse.de>
Date: Wed, 11 Dec 2019 11:07:25 +0100
From: Thomas Zimmermann <tzimmermann@...e.de>
To: Gerd Hoffmann <kraxel@...hat.com>, dri-devel@...ts.freedesktop.org
Cc: David Airlie <airlied@...ux.ie>,
open list <linux-kernel@...r.kernel.org>,
gurchetansingh@...omium.org
Subject: Re: [PATCH v2 1/2] drm/shmem: add support for per object caching
attributes
Am 11.12.19 um 10:58 schrieb Thomas Zimmermann:
> Hi Gerd
>
> Am 11.12.19 um 09:18 schrieb Gerd Hoffmann:
>> Add caching field to drm_gem_shmem_object to specify the cachine
>> attributes for mappings. Add helper function to tweak pgprot
>> accordingly. Switch vmap and mmap functions to the new helper.
>>
>> Set caching to write-combine when creating the object so behavior
>> doesn't change by default. Drivers can override that later if
>> needed.
>>
>> Signed-off-by: Gerd Hoffmann <kraxel@...hat.com>
>
> If you want to merge this patch, you have my
>
> Reviewed-by: Thomas Zimmermann <tzimmermann@...e.de>
>
> Please see my comment below.
>
>> ---
>> include/drm/drm_gem_shmem_helper.h | 12 ++++++++++++
>> drivers/gpu/drm/drm_gem_shmem_helper.c | 24 +++++++++++++++++++++---
>> 2 files changed, 33 insertions(+), 3 deletions(-)
>>
>> diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
>> index 6748379a0b44..9d6e02c6205f 100644
>> --- a/include/drm/drm_gem_shmem_helper.h
>> +++ b/include/drm/drm_gem_shmem_helper.h
>> @@ -17,6 +17,11 @@ struct drm_mode_create_dumb;
>> struct drm_printer;
>> struct sg_table;
>>
>> +enum drm_gem_shmem_caching {
>> + DRM_GEM_SHMEM_CACHED = 1,
>> + DRM_GEM_SHMEM_WC,
>> +};
>> +
>> /**
>> * struct drm_gem_shmem_object - GEM object backed by shmem
>> */
>> @@ -83,6 +88,11 @@ struct drm_gem_shmem_object {
>> * The address are un-mapped when the count reaches zero.
>> */
>> unsigned int vmap_use_count;
>> +
>> + /**
>> + * @caching: caching attributes for mappings.
>> + */
>> + enum drm_gem_shmem_caching caching;
>> };
>>
>> #define to_drm_gem_shmem_obj(obj) \
>> @@ -130,6 +140,8 @@ drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
>>
>> struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj);
>>
>> +pgprot_t drm_gem_shmem_caching(struct drm_gem_shmem_object *shmem, pgprot_t prot);
>> +
>> /**
>> * DRM_GEM_SHMEM_DRIVER_OPS - Default shmem GEM operations
>> *
>> diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
>> index a421a2eed48a..5bb94e130a50 100644
>> --- a/drivers/gpu/drm/drm_gem_shmem_helper.c
>> +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
>> @@ -76,6 +76,7 @@ struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t
>> mutex_init(&shmem->pages_lock);
>> mutex_init(&shmem->vmap_lock);
>> INIT_LIST_HEAD(&shmem->madv_list);
>> + shmem->caching = DRM_GEM_SHMEM_WC;
>>
>> /*
>> * Our buffers are kept pinned, so allocating them
>> @@ -256,9 +257,11 @@ static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
>>
>> if (obj->import_attach)
>> shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
>> - else
>> + else {
>> + pgprot_t prot = drm_gem_shmem_caching(shmem, PAGE_KERNEL);
>> shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
>> - VM_MAP, pgprot_writecombine(PAGE_KERNEL));
>> + VM_MAP, prot);
>> + }
>>
>> if (!shmem->vaddr) {
>> DRM_DEBUG_KMS("Failed to vmap pages\n");
>> @@ -540,7 +543,8 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
>> }
>>
>> vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
>> - vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
>> + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
>> + vma->vm_page_prot = drm_gem_shmem_caching(shmem, vma->vm_page_prot);
>> vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
>> vma->vm_ops = &drm_gem_shmem_vm_ops;
>>
>> @@ -683,3 +687,17 @@ drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
>> return ERR_PTR(ret);
>> }
>> EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
>> +
>> +pgprot_t drm_gem_shmem_caching(struct drm_gem_shmem_object *shmem, pgprot_t prot)
>> +{
>> + switch (shmem->caching) {
>> + case DRM_GEM_SHMEM_CACHED:
>> + return prot;
>> + case DRM_GEM_SHMEM_WC:
>> + return pgprot_writecombine(prot);
>> + default:
>> + WARN_ON_ONCE(1);
>> + return prot;
>> + }
>> +}
>> +EXPORT_SYMBOL_GPL(drm_gem_shmem_caching);
>
> Two reason why I'd reconsider this design.
>
> I don't like switch statements new the bottom of the call graph. The
> code ends up with default warnings, such as this one.
>
> Udl has different caching flags for imported and 'native' buffers. This
> would require a new constant and additional code here.
>
> What do you think about turning this function into a callback in struct
> shmem_funcs? The default implementation would be for WC, virtio would
> use CACHED. The individual implementations could still be located in the
> shmem code. Udl would later provide its own code.
On a second thought, all this might be over-engineered and v1 of the
patchset was the correct approach. You can add my
Acked-by: Thomas Zimmermann <tzimmermann@...e.de>
if you prefer to merge v1.
>
> Best regards
> Thomas
>
>>
>
--
Thomas Zimmermann
Graphics Driver Developer
SUSE Software Solutions Germany GmbH
Maxfeldstr. 5, 90409 Nürnberg, Germany
(HRB 36809, AG Nürnberg)
Geschäftsführer: Felix Imendörffer
Download attachment "signature.asc" of type "application/pgp-signature" (489 bytes)
Powered by blists - more mailing lists