lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260206062557.3718801-5-honglei1.huang@amd.com>
Date: Fri, 6 Feb 2026 14:25:53 +0800
From: Honglei Huang <honglei1.huang@....com>
To: <Felix.Kuehling@....com>, <alexander.deucher@....com>,
	<christian.koenig@....com>, <Ray.Huang@....com>
CC: <dmitry.osipenko@...labora.com>, <Xinhui.Pan@....com>,
	<airlied@...il.com>, <daniel@...ll.ch>, <amd-gfx@...ts.freedesktop.org>,
	<dri-devel@...ts.freedesktop.org>, <linux-kernel@...r.kernel.org>,
	<linux-mm@...ck.org>, <akpm@...ux-foundation.org>, <honghuan@....com>
Subject: [PATCH v3 4/8] drm/amdkfd: Add batch MMU notifier support

From: Honglei Huang <honghuan@....com>

Implement MMU notifier callbacks for batch userptr allocations.

This adds:
- amdgpu_amdkfd_evict_userptr_batch(): handles MMU invalidation
  events for batch allocations, using interval tree to identify
  affected ranges
- amdgpu_amdkfd_invalidate_userptr_batch(): wrapper for invalidate
  callback
- amdgpu_amdkfd_hsa_batch_ops: MMU notifier ops structure

Signed-off-by: Honglei Huang <honghuan@....com>
---
 .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c  | 57 +++++++++++++++++++
 1 file changed, 57 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 3b7fc6d15..af6db20de 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1143,6 +1143,63 @@ static bool mark_invalid_ranges(struct kgd_mem *mem,
 	return any_invalid;
 }
 
+static int amdgpu_amdkfd_evict_userptr_batch(struct mmu_interval_notifier *mni,
+					     const struct mmu_notifier_range *range,
+					     unsigned long cur_seq)
+{
+	struct kgd_mem *mem;
+	struct amdkfd_process_info *process_info;
+	int r = 0;
+
+	mem = container_of(mni, struct kgd_mem, batch_notifier);
+	process_info = mem->process_info;
+
+	if (READ_ONCE(process_info->block_mmu_notifications))
+		return 0;
+
+	if (!mark_invalid_ranges(mem, range->start, range->end)) {
+		pr_debug("Batch userptr: invalidation [0x%lx-0x%lx) does not affect any range\n",
+			 range->start, range->end);
+		return 0;
+	}
+
+	mutex_lock(&process_info->notifier_lock);
+	mmu_interval_set_seq(mni, cur_seq);
+
+	mem->invalid++;
+
+	if (++process_info->evicted_bos == 1) {
+		r = kgd2kfd_quiesce_mm(mni->mm,
+				       KFD_QUEUE_EVICTION_TRIGGER_USERPTR);
+
+		if (r && r != -ESRCH)
+			pr_err("Failed to quiesce KFD\n");
+
+		if (r != -ESRCH)
+			queue_delayed_work(system_freezable_wq,
+				&process_info->restore_userptr_work,
+				msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
+	}
+	mutex_unlock(&process_info->notifier_lock);
+
+	pr_debug("Batch userptr evicted: va_min=0x%llx va_max=0x%llx, inv_range=[0x%lx-0x%lx)\n",
+		 mem->batch_va_min, mem->batch_va_max, range->start, range->end);
+
+	return r;
+}
+
+static bool amdgpu_amdkfd_invalidate_userptr_batch(struct mmu_interval_notifier *mni,
+						   const struct mmu_notifier_range *range,
+						   unsigned long cur_seq)
+{
+	amdgpu_amdkfd_evict_userptr_batch(mni, range, cur_seq);
+	return true;
+}
+
+static const struct mmu_interval_notifier_ops amdgpu_amdkfd_hsa_batch_ops = {
+	.invalidate = amdgpu_amdkfd_invalidate_userptr_batch,
+};
+
 /* Reserving a BO and its page table BOs must happen atomically to
  * avoid deadlocks. Some operations update multiple VMs at once. Track
  * all the reservation info in a context structure. Optionally a sync
-- 
2.34.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ