[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20191203132239.5910-6-thomas_os@shipmail.org>
Date: Tue, 3 Dec 2019 14:22:36 +0100
From: Thomas Hellström (VMware)
<thomas_os@...pmail.org>
To: linux-mm@...ck.org, linux-kernel@...r.kernel.org,
dri-devel@...ts.freedesktop.org
Cc: pv-drivers@...are.com, linux-graphics-maintainer@...are.com,
Thomas Hellstrom <thellstrom@...are.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Michal Hocko <mhocko@...e.com>,
"Matthew Wilcox (Oracle)" <willy@...radead.org>,
"Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>,
Ralph Campbell <rcampbell@...dia.com>,
Jérôme Glisse <jglisse@...hat.com>,
Christian König <christian.koenig@....com>
Subject: [PATCH 5/8] drm/vmwgfx: Support huge page faults
From: Thomas Hellstrom <thellstrom@...are.com>
With vmwgfx dirty-tracking we need a specialized huge_fault
callback. Implement and hook it up.
Cc: Andrew Morton <akpm@...ux-foundation.org>
Cc: Michal Hocko <mhocko@...e.com>
Cc: "Matthew Wilcox (Oracle)" <willy@...radead.org>
Cc: "Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>
Cc: Ralph Campbell <rcampbell@...dia.com>
Cc: "Jérôme Glisse" <jglisse@...hat.com>
Cc: "Christian König" <christian.koenig@....com>
Signed-off-by: Thomas Hellstrom <thellstrom@...are.com>
---
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 2 +
drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c | 66 +++++++++++++++++++++-
drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c | 1 +
3 files changed, 68 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index a31e726d6d71..8656a97448c3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1428,6 +1428,8 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
pgoff_t start, pgoff_t end);
vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf);
vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf);
+vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf,
+ enum page_entry_size pe_size);
/**
* VMW_DEBUG_KMS - Debug output for kernel mode-setting
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
index 17a5dca7b921..6f76a97ad969 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
@@ -473,7 +473,7 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
* a lot of unnecessary write faults.
*/
if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE)
- prot = vma->vm_page_prot;
+ prot = vm_get_page_prot(vma->vm_flags & ~VM_SHARED);
else
prot = vm_get_page_prot(vma->vm_flags);
@@ -486,3 +486,67 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
return ret;
}
+
+vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf,
+ enum page_entry_size pe_size)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
+ vma->vm_private_data;
+ struct vmw_buffer_object *vbo =
+ container_of(bo, struct vmw_buffer_object, base);
+ pgprot_t prot;
+ vm_fault_t ret;
+ pgoff_t fault_page_size;
+
+ switch (pe_size) {
+ case PE_SIZE_PMD:
+ fault_page_size = HPAGE_PMD_SIZE >> PAGE_SHIFT;
+ break;
+ case PE_SIZE_PUD:
+ fault_page_size = HPAGE_PUD_SIZE >> PAGE_SHIFT;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return VM_FAULT_FALLBACK;
+ }
+
+ /* Always do write dirty-tracking on PTE level. */
+ if (READ_ONCE(vbo->dirty) && (vmf->flags & FAULT_FLAG_WRITE))
+ return VM_FAULT_FALLBACK;
+
+ ret = ttm_bo_vm_reserve(bo, vmf);
+ if (ret)
+ return ret;
+
+ if (vbo->dirty) {
+ pgoff_t allowed_prefault;
+ unsigned long page_offset;
+
+ page_offset = vmf->pgoff -
+ drm_vma_node_start(&bo->base.vma_node);
+ if (page_offset >= bo->num_pages ||
+ vmw_resources_clean(vbo, page_offset,
+ page_offset + PAGE_SIZE,
+ &allowed_prefault)) {
+ ret = VM_FAULT_SIGBUS;
+ goto out_unlock;
+ }
+
+ /*
+ * Write protect, so we get a new fault on write, and can
+ * split.
+ */
+ prot = vm_get_page_prot(vma->vm_flags & ~VM_SHARED);
+ } else {
+ prot = vm_get_page_prot(vma->vm_flags);
+ }
+ ret = ttm_bo_vm_fault_reserved(vmf, prot, 1, fault_page_size);
+ if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+ return ret;
+
+out_unlock:
+ dma_resv_unlock(bo->base.resv);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index ce288756531b..de838ba88a97 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -33,6 +33,7 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
.pfn_mkwrite = vmw_bo_vm_mkwrite,
.page_mkwrite = vmw_bo_vm_mkwrite,
.fault = vmw_bo_vm_fault,
+ .huge_fault = vmw_bo_vm_huge_fault,
.open = ttm_bo_vm_open,
.close = ttm_bo_vm_close
};
--
2.21.0
Powered by blists - more mailing lists