[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251112072910.3716944-5-honglei1.huang@amd.com>
Date: Wed, 12 Nov 2025 15:29:09 +0800
From: Honglei Huang <honglei1.huang@....com>
To: <Felix.Kuehling@....com>, <alexander.deucher@....com>,
<christian.koenig@....com>, <Ray.Huang@....com>
CC: <dmitry.osipenko@...labora.com>, <Xinhui.Pan@....com>,
<airlied@...il.com>, <daniel@...ll.ch>, <amd-gfx@...ts.freedesktop.org>,
<dri-devel@...ts.freedesktop.org>, <linux-kernel@...r.kernel.org>,
<linux-mm@...ck.org>, <akpm@...ux-foundation.org>, <honghuang@....com>,
Honglei Huang <Honglei1.Huang@....com>
Subject: [PATCH 4/5] drm/amdkfd: Add support for pinned user pages in SVM ranges
From: Honglei Huang <Honglei1.Huang@....com>
Implement core functionality to pin and manage user pages for
non-contiguous SVM ranges:
1. Add svm_pin_user_ranges() function:
- Pin multiple non-contiguous user memory ranges
- Use pin_user_pages_fast() to lock pages in memory
- Store pinned pages in VMA's vm_private_data
- Set up custom VMA operations for fault handling
2. Add svm_range_get_mapped_pages() function:
- Optimized path for pre-mapped VMAs
- Retrieve pages directly from vm_private_data
- Bypass HMM for already-pinned pages
3. Implement svm_iovec_ops VMA operations:
- svm_iovec_fault(): Handle page faults by returning pre-pinned pages
- svm_iovec_close(): Cleanup and unpin pages on VMA close
4. Add is_map flag to struct svm_range:
- Track whether a range uses the pinned pages mechanism
- Enable conditional logic in DMA mapping and validation paths
5. Update DMA mapping logic:
- Skip special device page handling for pinned user pages
- Treat pinned pages as regular system memory for DMA
6. Modify validation logic:
- svm_range_is_valid() accepts mapped VMAs when is_map flag is set
- svm_range_validate_and_map() uses appropriate page retrieval path
This infrastructure enables efficient handling of scattered user
buffers without requiring memory to be virtually contiguous,
supporting use cases like multiple malloc() allocations being
registered to GPU address space.
Signed-off-by: Honglei Huang <Honglei1.Huang@....com>
---
drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 232 ++++++++++++++++++++++++++-
drivers/gpu/drm/amd/amdkfd/kfd_svm.h | 3 +
2 files changed, 229 insertions(+), 6 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 31e500859ab0..fef0d147d938 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -179,7 +179,7 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
page = hmm_pfn_to_page(hmm_pfns[i]);
- if (is_zone_device_page(page)) {
+ if (is_zone_device_page(page) && prange->svm_bo && !prange->is_map) {
struct amdgpu_device *bo_adev = prange->svm_bo->node->adev;
addr[i] = (hmm_pfns[i] << PAGE_SHIFT) +
@@ -682,6 +682,18 @@ static int svm_range_bo_validate(void *param, struct amdgpu_bo *bo)
return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
}
+static bool
+svm_range_has_mapped_attr(uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
+{
+ uint32_t i;
+
+ for (i = 0; i < nattr; i++) {
+ if (attrs[i].type == KFD_IOCTL_SVM_ATTR_MAPPED)
+ return true;
+ }
+ return false;
+}
+
static int
svm_range_check_attr(struct kfd_process *p,
uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
@@ -713,6 +725,8 @@ svm_range_check_attr(struct kfd_process *p,
break;
case KFD_IOCTL_SVM_ATTR_GRANULARITY:
break;
+ case KFD_IOCTL_SVM_ATTR_MAPPED:
+ break;
default:
pr_debug("unknown attr type 0x%x\n", attrs[i].type);
return -EINVAL;
@@ -777,6 +791,9 @@ svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
case KFD_IOCTL_SVM_ATTR_GRANULARITY:
prange->granularity = min_t(uint32_t, attrs[i].value, 0x3F);
break;
+ case KFD_IOCTL_SVM_ATTR_MAPPED:
+ prange->is_map = true;
+ break;
default:
WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
}
@@ -830,6 +847,8 @@ svm_range_is_same_attrs(struct kfd_process *p, struct svm_range *prange,
if (prange->granularity != attrs[i].value)
return false;
break;
+ case KFD_IOCTL_SVM_ATTR_MAPPED:
+ return false;
default:
WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
}
@@ -1547,6 +1566,81 @@ static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
return SVM_ADEV_PGMAP_OWNER(pdd->dev->adev);
}
+static int svm_range_is_mapped_vma(struct vm_area_struct *vma)
+{
+ return vma && (vma->vm_flags & (VM_IO | VM_PFNMAP));
+}
+
+static int svm_range_get_mapped_pages(struct mmu_interval_notifier *notifier,
+ struct mm_struct *mm, struct page **pages,
+ uint64_t start, uint64_t npages,
+ struct hmm_range **phmm_range,
+ bool readonly, bool mmap_locked,
+ void *owner, struct vm_area_struct *vma)
+{
+ struct hmm_range *hmm_range;
+ unsigned long *pfns;
+
+ unsigned long vma_size;
+ struct page **vma_pages;
+ unsigned long vma_start_offset;
+ unsigned long i;
+ int r = 0;
+
+ hmm_range = kzalloc(sizeof(*hmm_range), GFP_KERNEL);
+ if (unlikely(!hmm_range))
+ return -ENOMEM;
+
+ pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL);
+ if (unlikely(!pfns)) {
+ r = -ENOMEM;
+ goto out_free_range;
+ }
+
+ hmm_range->notifier = notifier;
+ hmm_range->default_flags = HMM_PFN_REQ_FAULT;
+ if (!readonly)
+ hmm_range->default_flags |= HMM_PFN_REQ_WRITE;
+ hmm_range->hmm_pfns = pfns;
+ hmm_range->start = start;
+ hmm_range->end = start + npages * PAGE_SIZE;
+ hmm_range->dev_private_owner = owner;
+
+ hmm_range->notifier_seq = mmu_interval_read_begin(notifier);
+
+ if (likely(!mmap_locked))
+ mmap_read_lock(mm);
+
+ vma_pages = vma->vm_private_data;
+ vma_size = vma->vm_end - vma->vm_start;
+ vma_start_offset = (unsigned long)start - vma->vm_start;
+
+ if ((vma_size >> PAGE_SHIFT) < npages) {
+ pr_err("ERROR: mapped vma npages: %lx < userptr map npages: %llx\n",
+ vma_size, npages);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < npages; i++)
+ pfns[i] = page_to_pfn(
+ vma_pages[(vma_start_offset >> PAGE_SHIFT) + i]);
+
+ if (likely(!mmap_locked))
+ mmap_read_unlock(mm);
+
+ for (i = 0; pages && i < npages; i++)
+ pages[i] = hmm_pfn_to_page(pfns[i]);
+
+ *phmm_range = hmm_range;
+
+ return 0;
+
+out_free_range:
+ kfree(hmm_range);
+
+ return r;
+}
+
/*
* Validation+GPU mapping with concurrent invalidation (MMU notifiers)
*
@@ -1674,7 +1768,15 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
next = min(vma->vm_end, end);
npages = (next - addr) >> PAGE_SHIFT;
WRITE_ONCE(p->svms.faulting_task, current);
- r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
+ if (svm_range_is_mapped_vma(vma))
+ {
+ r = svm_range_get_mapped_pages(&prange->notifier, mm, NULL,
+ addr, npages, &hmm_range,
+ readonly, true, owner, vma);
+ prange->is_map = true;
+ }
+ else
+ r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
readonly, owner, NULL,
&hmm_range);
WRITE_ONCE(p->svms.faulting_task, NULL);
@@ -3269,9 +3371,9 @@ svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
* 0 - OK, otherwise error code
*/
static int
-svm_range_is_valid(struct kfd_process *p, uint64_t start, uint64_t size)
+svm_range_is_valid(struct kfd_process *p, uint64_t start, uint64_t size, bool mapped)
{
- const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
+ const unsigned long device_vma = mapped ? 0 : VM_IO | VM_PFNMAP | VM_MIXEDMAP;
struct vm_area_struct *vma;
unsigned long end;
unsigned long start_unchg = start;
@@ -3510,6 +3612,8 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
svm_range_bo_unref(svm_bo);
}
+
+
static int
svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
uint64_t start, uint64_t size, uint32_t nattr,
@@ -3525,6 +3629,7 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
struct svm_range *next;
bool update_mapping = false;
bool flush_tlb;
+ bool if_mapped;
int r, ret = 0;
pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n",
@@ -3540,7 +3645,9 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
svm_range_list_lock_and_flush_work(svms, mm);
- r = svm_range_is_valid(p, start, size);
+ if_mapped = svm_range_has_mapped_attr(nattr, attrs);
+
+ r = svm_range_is_valid(p, start, size, if_mapped);
if (r) {
pr_debug("invalid range r=%d\n", r);
mmap_write_unlock(mm);
@@ -3679,7 +3786,7 @@ svm_range_get_attr(struct kfd_process *p, struct mm_struct *mm,
flush_work(&p->svms.deferred_list_work);
mmap_read_lock(mm);
- r = svm_range_is_valid(p, start, size);
+ r = svm_range_is_valid(p, start, size, false);
mmap_read_unlock(mm);
if (r) {
pr_debug("invalid range r=%d\n", r);
@@ -4153,3 +4260,116 @@ svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
return r;
}
+
+static void svm_iovec_close(struct vm_area_struct *vma)
+{
+ struct page **pages = vma->vm_private_data;
+ uint32_t npages;
+
+ if (!pages)
+ return;
+
+ npages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+
+ unpin_user_pages_dirty_lock(pages, npages, false);
+ pr_debug("svm_iovec_close, unpin pages, start: 0x%lx, npages: 0x%x\n",
+ vma->vm_start, npages);
+
+ kvfree(pages);
+ vma->vm_private_data = NULL;
+}
+
+static vm_fault_t svm_iovec_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct page **pages;
+
+ if ((vmf->pgoff << PAGE_SHIFT) >= (vma->vm_end - vma->vm_start)) {
+ return VM_FAULT_SIGBUS;
+ }
+
+ pages = (struct page **)vma->vm_private_data;
+ if (!pages) {
+ return VM_FAULT_SIGBUS;
+ }
+
+ vmf->page = pages[vmf->pgoff];
+
+ return VM_FAULT_NOPAGE;
+}
+
+static const struct vm_operations_struct svm_iovec_ops = {
+ .close = svm_iovec_close,
+ .fault = svm_iovec_fault,
+};
+
+int svm_pin_user_ranges(struct kfd_process *p, uint64_t start, uint64_t size,
+ struct kfd_ioctl_svm_range *ranges, uint64_t nranges)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ struct page **pages = NULL, **cur_page;
+ uint32_t vma_size, npages = 0, pinned_pages = 0;
+ int i, ret;
+
+ mmap_read_lock(mm);
+ vma = find_vma(mm, start);
+ if (!vma) {
+ pr_err("failed to find vma, start: 0x%llx\n", start);
+ mmap_read_unlock(mm);
+ return -EINVAL;
+ }
+ mmap_read_unlock(mm);
+
+ if (vma->vm_ops == &svm_iovec_ops)
+ return 0;
+
+ vma_size = vma->vm_end - vma->vm_start;
+ if (size > vma_size) {
+ pr_err("vma size: %x < target size: %llx\n", vma_size, size);
+ goto failed_free;
+ }
+
+ for (i = 0; i < nranges; i++) {
+ npages += ranges[i].size >> PAGE_SHIFT;
+ }
+
+ pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
+ if (!pages) {
+ pr_err("failed to allocate pages\n");
+ ret = -ENOMEM;
+ goto failed_free;
+ }
+
+ cur_page = pages;
+
+ for (i = 0; i < nranges; i++) {
+ ret = pin_user_pages_fast(ranges[i].addr,
+ (ranges[i].size >> PAGE_SHIFT),
+ FOLL_WRITE | FOLL_FORCE, cur_page);
+ if (ret < 0) {
+ pr_err("failed to pin user pages, addr: 0x%llx, size: 0x%llx\n",
+ ranges[i].addr, ranges[i].size);
+ if (pinned_pages)
+ unpin_user_pages(pages, pinned_pages);
+ goto failed_free;
+ }
+
+ cur_page += (ranges[i].size >> PAGE_SHIFT);
+ pinned_pages += (ranges[i].size >> PAGE_SHIFT);
+ }
+
+ mmap_write_lock(mm);
+ vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
+ vma->vm_private_data = pages;
+ vma->vm_ops = &svm_iovec_ops;
+ mmap_write_unlock(mm);
+ return 0;
+
+failed_free:
+ if (pages) {
+ unpin_user_pages_dirty_lock(pages, pinned_pages, false);
+ kvfree(pages);
+ }
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index 70c1776611c4..ebaa10fce8c1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -137,6 +137,7 @@ struct svm_range {
DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
bool mapped_to_gpu;
+ bool is_map;
};
static inline void svm_range_lock(struct svm_range *prange)
@@ -207,6 +208,8 @@ void svm_range_bo_unref_async(struct svm_range_bo *svm_bo);
void svm_range_set_max_pages(struct amdgpu_device *adev);
int svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled);
+int svm_pin_user_ranges(struct kfd_process *p, uint64_t start, uint64_t size,
+ struct kfd_ioctl_svm_range *ranges, uint64_t nranges);
#else
--
2.34.1
Powered by blists - more mailing lists