[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <d1e5d883-1ade-4d68-a240-9ac0301d2baf@vivo.com>
Date: Fri, 6 Sep 2024 16:39:39 +0800
From: Huan Yang <link@...o.com>
To: "Kasireddy, Vivek" <vivek.kasireddy@...el.com>,
Sumit Semwal <sumit.semwal@...aro.org>,
Christian König <christian.koenig@....com>,
Gerd Hoffmann <kraxel@...hat.com>,
"linux-media@...r.kernel.org" <linux-media@...r.kernel.org>,
"dri-devel@...ts.freedesktop.org" <dri-devel@...ts.freedesktop.org>,
"linaro-mm-sig@...ts.linaro.org" <linaro-mm-sig@...ts.linaro.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>
Cc: "opensource.kernel@...o.com" <opensource.kernel@...o.com>
Subject: Re: [PATCH v5 6/7] udmabuf: remove udmabuf_folio
在 2024/9/6 16:22, Kasireddy, Vivek 写道:
> Hi Huan,
>
>> Subject: [PATCH v5 6/7] udmabuf: remove udmabuf_folio
>>
>> Currently, udmabuf handles folio by creating an unpin list to record
>> each folio obtained from the list and unpinning them when released. To
>> maintain this approach, many data structures have been established.
>>
>> However, maintaining this type of data structure requires a significant
>> amount of memory and traversing the list is a substantial overhead,
>> which is not friendly to the CPU cache.
>>
>> Considering that during creation, we arranged the folio array in the
>> order of pin and set the offset according to pgcnt.
>>
>> We actually don't need to use unpin_list to unpin during release.
>> Instead, we can iterate through the folios array during release and
>> unpin any folio that is different from the ones previously accessed.
>>
>> By this, not only saves the overhead of the udmabuf_folio data structure
>> but also makes array access more cache-friendly.
>>
>> Signed-off-by: Huan Yang <link@...o.com>
>> ---
>> drivers/dma-buf/udmabuf.c | 65 +++++++++++++++++----------------------
>> 1 file changed, 29 insertions(+), 36 deletions(-)
>>
>> diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
>> index 254d9ec3d9f3..d449c1fd67a5 100644
>> --- a/drivers/dma-buf/udmabuf.c
>> +++ b/drivers/dma-buf/udmabuf.c
>> @@ -27,15 +27,21 @@ MODULE_PARM_DESC(size_limit_mb, "Max size of a
>> dmabuf, in megabytes. Default is
>> struct udmabuf {
>> pgoff_t pagecount;
>> struct folio **folios;
>> +
>> + /**
>> + * Unlike folios, pinned_folios is only used for unpin.
>> + * So, nr_pinned is not the same to pagecount, the pinned_folios
>> + * only set each folio which already pinned when udmabuf_create.
>> + * Note that, since a folio may be pinned multiple times, each folio
>> + * can be added to pinned_folios multiple times, depending on how
>> many
>> + * times the folio has been pinned when create.
>> + */
>> + pgoff_t nr_pinned;
>> + struct folio **pinned_folios;
>> +
>> struct sg_table *sg;
>> struct miscdevice *device;
>> pgoff_t *offsets;
>> - struct list_head unpin_list;
>> -};
>> -
>> -struct udmabuf_folio {
>> - struct folio *folio;
>> - struct list_head list;
>> };
>>
>> static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
>> @@ -198,38 +204,18 @@ static void unmap_udmabuf(struct
>> dma_buf_attachment *at,
>> return put_sg_table(at->dev, sg, direction);
>> }
>>
>> -static void unpin_all_folios(struct list_head *unpin_list)
>> +static void unpin_all_folios(struct udmabuf *ubuf)
>> {
>> - struct udmabuf_folio *ubuf_folio;
>> + pgoff_t i;
>>
>> - while (!list_empty(unpin_list)) {
>> - ubuf_folio = list_first_entry(unpin_list,
>> - struct udmabuf_folio, list);
>> - unpin_folio(ubuf_folio->folio);
>> + for (i = 0; i < ubuf->nr_pinned; ++i)
>> + unpin_folio(ubuf->pinned_folios[i]);
>>
>> - list_del(&ubuf_folio->list);
>> - kfree(ubuf_folio);
>> - }
>> -}
>> -
>> -static int add_to_unpin_list(struct list_head *unpin_list,
>> - struct folio *folio)
>> -{
>> - struct udmabuf_folio *ubuf_folio;
>> -
>> - ubuf_folio = kzalloc(sizeof(*ubuf_folio), GFP_KERNEL);
>> - if (!ubuf_folio)
>> - return -ENOMEM;
>> -
>> - ubuf_folio->folio = folio;
>> - list_add_tail(&ubuf_folio->list, unpin_list);
>> - return 0;
>> + kvfree(ubuf->pinned_folios);
>> }
>>
>> static __always_inline int init_udmabuf(struct udmabuf *ubuf, pgoff_t
>> pgcnt)
>> {
>> - INIT_LIST_HEAD(&ubuf->unpin_list);
>> -
>> ubuf->folios = kvmalloc_array(pgcnt, sizeof(*ubuf->folios),
>> GFP_KERNEL);
>> if (!ubuf->folios)
>> return -ENOMEM;
>> @@ -238,12 +224,18 @@ static __always_inline int init_udmabuf(struct
>> udmabuf *ubuf, pgoff_t pgcnt)
>> if (!ubuf->offsets)
>> return -ENOMEM;
>>
>> + ubuf->pinned_folios = kvmalloc_array(pgcnt,
>> + sizeof(*ubuf->pinned_folios),
>> + GFP_KERNEL);
>> + if (!ubuf->pinned_folios)
>> + return -ENOMEM;
>> +
>> return 0;
>> }
>>
>> static __always_inline void deinit_udmabuf(struct udmabuf *ubuf)
>> {
>> - unpin_all_folios(&ubuf->unpin_list);
>> + unpin_all_folios(ubuf);
>> kvfree(ubuf->offsets);
>> kvfree(ubuf->folios);
>> }
>> @@ -353,7 +345,9 @@ static int export_udmabuf(struct udmabuf *ubuf,
>> static int udmabuf_pin_folios(struct udmabuf *ubuf, struct file *memfd,
>> loff_t start, loff_t size)
>> {
>> - pgoff_t pgoff, pgcnt, upgcnt = ubuf->pagecount;
>> + pgoff_t pgoff, pgcnt;
>> + pgoff_t upgcnt = ubuf->pagecount;
>> + pgoff_t nr_pinned = ubuf->nr_pinned;
> Reverse xmas style is preferred; please try to use that.
OK, I'll take care of it, and other patch. :)
>
>> u32 cur_folio, cur_pgcnt;
>> struct folio **folios = NULL;
>> long nr_folios;
>> @@ -377,9 +371,7 @@ static int udmabuf_pin_folios(struct udmabuf *ubuf,
>> struct file *memfd,
>> pgoff_t subpgoff = pgoff;
>> size_t fsize = folio_size(folios[cur_folio]);
>>
>> - ret = add_to_unpin_list(&ubuf->unpin_list, folios[cur_folio]);
>> - if (ret < 0)
>> - goto err;
>> + ubuf->pinned_folios[nr_pinned++] = folios[cur_folio];
>>
>> for (; subpgoff < fsize; subpgoff += PAGE_SIZE) {
>> ubuf->folios[upgcnt] = folios[cur_folio];
>> @@ -399,6 +391,7 @@ static int udmabuf_pin_folios(struct udmabuf *ubuf,
>> struct file *memfd,
>> end:
>> err:
>> ubuf->pagecount = upgcnt;
>> + ubuf->nr_pinned = nr_pinned;
> Acked-by: Vivek Kasireddy <vivek.kasireddy@...el.com>
>
>> kvfree(folios);
>> return ret;
>> }
>> --
>> 2.45.2
Powered by blists - more mailing lists