[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <1f356be5-2535-8f76-f33f-540feb3a72ea@shipmail.org>
Date: Wed, 27 Nov 2019 13:30:14 +0100
From: Thomas Hellström (VMware)
<thomas_os@...pmail.org>
To: Christian König <christian.koenig@....com>,
dri-devel@...ts.freedesktop.org, linux-mm@...ck.org,
linux-kernel@...r.kernel.org, linux-graphics-maintainer@...are.com
Cc: Thomas Hellstrom <thellstrom@...are.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Michal Hocko <mhocko@...e.com>,
"Matthew Wilcox (Oracle)" <willy@...radead.org>,
"Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>,
Ralph Campbell <rcampbell@...dia.com>,
Jérôme Glisse <jglisse@...hat.com>
Subject: Re: [RFC PATCH 6/7] drm/ttm: Introduce a huge page aligning TTM range
manager.
On 11/27/19 11:05 AM, Christian König wrote:
> I don't see the advantage over just increasing the alignment
> requirements in the driver side?
The advantage is that we don't fail space allocation if we can't match
the alignment. We instead fall back to a lower alignment if it's
compatible with the GPU required alignment.
Thanks,
/Thomas
>
> That would be a one liner if I'm not completely mistaken.
>
> Regards,
> Christian.
>
> Am 27.11.19 um 09:31 schrieb Thomas Hellström (VMware):
>> From: Thomas Hellstrom <thellstrom@...are.com>
>>
>> Using huge page-table entries require that the start of a buffer object
>> is huge page size aligned. So introduce a ttm_bo_man_get_node_huge()
>> function that attempts to accomplish this for allocations that are
>> larger
>> than the huge page size, and provide a new range-manager instance that
>> uses that function.
>>
>> Cc: Andrew Morton <akpm@...ux-foundation.org>
>> Cc: Michal Hocko <mhocko@...e.com>
>> Cc: "Matthew Wilcox (Oracle)" <willy@...radead.org>
>> Cc: "Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>
>> Cc: Ralph Campbell <rcampbell@...dia.com>
>> Cc: "Jérôme Glisse" <jglisse@...hat.com>
>> Cc: "Christian König" <christian.koenig@....com>
>> Signed-off-by: Thomas Hellstrom <thellstrom@...are.com>
>> ---
>> drivers/gpu/drm/ttm/ttm_bo_manager.c | 92 ++++++++++++++++++++++++++++
>> include/drm/ttm/ttm_bo_driver.h | 1 +
>> 2 files changed, 93 insertions(+)
>>
>> diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c
>> b/drivers/gpu/drm/ttm/ttm_bo_manager.c
>> index 18d3debcc949..26aa1a2ae7f1 100644
>> --- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
>> +++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
>> @@ -89,6 +89,89 @@ static int ttm_bo_man_get_node(struct
>> ttm_mem_type_manager *man,
>> return 0;
>> }
>> +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
>> +static int ttm_bo_insert_aligned(struct drm_mm *mm, struct
>> drm_mm_node *node,
>> + unsigned long align_pages,
>> + const struct ttm_place *place,
>> + struct ttm_mem_reg *mem,
>> + unsigned long lpfn,
>> + enum drm_mm_insert_mode mode)
>> +{
>> + if (align_pages >= mem->page_alignment &&
>> + (!mem->page_alignment || align_pages % mem->page_alignment
>> == 0)) {
>> + return drm_mm_insert_node_in_range(mm, node,
>> + mem->num_pages,
>> + align_pages, 0,
>> + place->fpfn, lpfn, mode);
>> + }
>> +
>> + return -ENOSPC;
>> +}
>> +
>> +static int ttm_bo_man_get_node_huge(struct ttm_mem_type_manager *man,
>> + struct ttm_buffer_object *bo,
>> + const struct ttm_place *place,
>> + struct ttm_mem_reg *mem)
>> +{
>> + struct ttm_range_manager *rman = (struct ttm_range_manager *)
>> man->priv;
>> + struct drm_mm *mm = &rman->mm;
>> + struct drm_mm_node *node;
>> + unsigned long align_pages;
>> + unsigned long lpfn;
>> + enum drm_mm_insert_mode mode = DRM_MM_INSERT_BEST;
>> + int ret;
>> +
>> + node = kzalloc(sizeof(*node), GFP_KERNEL);
>> + if (!node)
>> + return -ENOMEM;
>> +
>> + lpfn = place->lpfn;
>> + if (!lpfn)
>> + lpfn = man->size;
>> +
>> + mode = DRM_MM_INSERT_BEST;
>> + if (place->flags & TTM_PL_FLAG_TOPDOWN)
>> + mode = DRM_MM_INSERT_HIGH;
>> +
>> + spin_lock(&rman->lock);
>> + if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) {
>> + align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT);
>> + if (mem->num_pages >= align_pages) {
>> + ret = ttm_bo_insert_aligned(mm, node, align_pages,
>> + place, mem, lpfn, mode);
>> + if (!ret)
>> + goto found_unlock;
>> + }
>> + }
>> +
>> + align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT);
>> + if (mem->num_pages >= align_pages) {
>> + ret = ttm_bo_insert_aligned(mm, node, align_pages, place, mem,
>> + lpfn, mode);
>> + if (!ret)
>> + goto found_unlock;
>> + }
>> +
>> + ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
>> + mem->page_alignment, 0,
>> + place->fpfn, lpfn, mode);
>> +found_unlock:
>> + spin_unlock(&rman->lock);
>> +
>> + if (unlikely(ret)) {
>> + kfree(node);
>> + } else {
>> + mem->mm_node = node;
>> + mem->start = node->start;
>> + }
>> +
>> + return 0;
>> +}
>> +#else
>> +#define ttm_bo_man_get_node_huge ttm_bo_man_get_node
>> +#endif
>> +
>> +
>> static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
>> struct ttm_mem_reg *mem)
>> {
>> @@ -154,3 +237,12 @@ const struct ttm_mem_type_manager_func
>> ttm_bo_manager_func = {
>> .debug = ttm_bo_man_debug
>> };
>> EXPORT_SYMBOL(ttm_bo_manager_func);
>> +
>> +const struct ttm_mem_type_manager_func ttm_bo_manager_huge_func = {
>> + .init = ttm_bo_man_init,
>> + .takedown = ttm_bo_man_takedown,
>> + .get_node = ttm_bo_man_get_node_huge,
>> + .put_node = ttm_bo_man_put_node,
>> + .debug = ttm_bo_man_debug
>> +};
>> +EXPORT_SYMBOL(ttm_bo_manager_huge_func);
>> diff --git a/include/drm/ttm/ttm_bo_driver.h
>> b/include/drm/ttm/ttm_bo_driver.h
>> index cac7a8a0825a..868bd0d4be6a 100644
>> --- a/include/drm/ttm/ttm_bo_driver.h
>> +++ b/include/drm/ttm/ttm_bo_driver.h
>> @@ -888,5 +888,6 @@ int ttm_bo_pipeline_gutting(struct
>> ttm_buffer_object *bo);
>> pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
>> extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
>> +extern const struct ttm_mem_type_manager_func ttm_bo_manager_huge_func;
>> #endif
Powered by blists - more mailing lists