lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Message-ID: <20221126083954.112164-1-xinhui.pan@amd.com>
Date:   Sat, 26 Nov 2022 16:39:54 +0800
From:   xinhui pan <xinhui.pan@....com>
To:     <amd-gfx@...ts.freedesktop.org>
CC:     <daniel@...ll.ch>, <dri-devel@...ts.freedesktop.org>,
        <linux-kernel@...r.kernel.org>, <matthew.auld@...el.com>,
        <christian.koenig@....com>, xinhui pan <xinhui.pan@....com>
Subject: [PATCH] drm: Optimise for continuous memory allocation

Currently drm-buddy does not have full knowledge of continuous memory.

Lets consider scenario below.
order 1:    L		    R
order 0: LL	LR	RL	RR
for order 1 allocation, it can offer L or R or LR+RL.

For now, we only implement L or R case for continuous memory allocation.
So this patch aims to implement the LR+RL case.

Signed-off-by: xinhui pan <xinhui.pan@....com>
---
 drivers/gpu/drm/drm_buddy.c | 56 ++++++++++++++++++++++++++++++++-----
 1 file changed, 49 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
index 11bb59399471..550af558342e 100644
--- a/drivers/gpu/drm/drm_buddy.c
+++ b/drivers/gpu/drm/drm_buddy.c
@@ -386,6 +386,37 @@ alloc_range_bias(struct drm_buddy *mm,
 	return ERR_PTR(err);
 }
 
+static struct drm_buddy_block *
+find_continuous_blocks(struct drm_buddy *mm,
+		       int order,
+		       struct drm_buddy_block **rn)
+{
+	struct list_head *head = &mm->free_list[order];
+	struct drm_buddy_block *node, *parent, *free_node;
+
+	list_for_each_entry(free_node, head, link) {
+		node = free_node;
+		parent = node->parent;
+		while (parent && parent->right == node) {
+			node = parent;
+			parent = node->parent;
+		}
+		if (!parent)
+			continue;
+
+		node = parent->right;
+		while (drm_buddy_block_is_split(node))
+			node = node->left;
+
+		if (drm_buddy_block_is_free(node) &&
+		    drm_buddy_block_order(node) == order) {
+			*rn = node;
+			return free_node;
+		}
+	}
+	return NULL;
+}
+
 static struct drm_buddy_block *
 get_maxblock(struct list_head *head)
 {
@@ -637,7 +668,7 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
 			   struct list_head *blocks,
 			   unsigned long flags)
 {
-	struct drm_buddy_block *block = NULL;
+	struct drm_buddy_block *block = NULL, *rblock = NULL;
 	unsigned int min_order, order;
 	unsigned long pages;
 	LIST_HEAD(allocated);
@@ -689,17 +720,28 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
 				break;
 
 			if (order-- == min_order) {
+				if (!(flags & DRM_BUDDY_RANGE_ALLOCATION) &&
+				    min_order != 0 && pages == BIT(order + 1)) {
+					block = find_continuous_blocks(mm,
+								       order,
+								       &rblock);
+					if (block)
+						break;
+				}
 				err = -ENOSPC;
 				goto err_free;
 			}
 		} while (1);
 
-		mark_allocated(block);
-		mm->avail -= drm_buddy_block_size(mm, block);
-		kmemleak_update_trace(block);
-		list_add_tail(&block->link, &allocated);
-
-		pages -= BIT(order);
+		do {
+			mark_allocated(block);
+			mm->avail -= drm_buddy_block_size(mm, block);
+			kmemleak_update_trace(block);
+			list_add_tail(&block->link, &allocated);
+			pages -= BIT(order);
+			block = rblock;
+			rblock = NULL;
+		} while (block);
 
 		if (!pages)
 			break;
-- 
2.34.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ