lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190723235747.GP15331@mellanox.com>
Date:   Tue, 23 Jul 2019 23:57:52 +0000
From:   Jason Gunthorpe <jgg@...lanox.com>
To:     Ralph Campbell <rcampbell@...dia.com>
CC:     "linux-mm@...ck.org" <linux-mm@...ck.org>,
        "linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
        Jérôme Glisse <jglisse@...hat.com>,
        Christoph Hellwig <hch@....de>
Subject: Re: [PATCH 1/2] mm/hmm: a few more C style and comment clean ups

On Tue, Jul 23, 2019 at 04:30:15PM -0700, Ralph Campbell wrote:
> -	if (pmd_huge(pmd) && (range->vma->vm_flags & VM_HUGETLB))
> +	if (pmd_huge(pmd) && is_vm_hugetlb_page(vma))
>  		return hmm_pfns_bad(start, end, walk);

This one is not a minor cleanup.. I think it should be done on its
own commit, and more comletely, maybe based on the below..

If vma is always the same as the the first vma, then your hunk above
here is much better than introducing a hugetlb flag as I did below..

Although I don't understand why we have this test when it does seem to
support huge pages, and the commit log suggests hugetlbfs was
deliberately supported. So a comment (or deletion) sure would be nice.

So maybe sequence this into your series?

Jason

>From 6ea7cd2565b5b660d22a659b71b62614e66bc345 Mon Sep 17 00:00:00 2001
From: Jason Gunthorpe <jgg@...lanox.com>
Date: Tue, 23 Jul 2019 12:28:32 -0300
Subject: [PATCH] mm/hmm: remove hmm_range vma

This value is only read inside hmm_vma_walk_pmd() and all the callers,
through walk_page_range(), always set the value. The proper place for
per-walk data is in hmm_vma_walk, and since the only usage is a vm_flags
test just precompute and store that.

Signed-off-by: Jason Gunthorpe <jgg@...lanox.com>
---
 drivers/gpu/drm/nouveau/nouveau_svm.c |  7 +++----
 include/linux/hmm.h                   |  1 -
 mm/hmm.c                              | 11 ++++++-----
 3 files changed, 9 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index a9c5c58d425b3d..4f4bec40b887a6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -495,12 +495,12 @@ nouveau_range_fault(struct hmm_mirror *mirror, struct hmm_range *range)
 				 range->start, range->end,
 				 PAGE_SHIFT);
 	if (ret) {
-		up_read(&range->vma->vm_mm->mmap_sem);
+		up_read(&range->hmm->mm->mmap_sem);
 		return (int)ret;
 	}
 
 	if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
-		up_read(&range->vma->vm_mm->mmap_sem);
+		up_read(&range->hmm->mm->mmap_sem);
 		return -EBUSY;
 	}
 
@@ -508,7 +508,7 @@ nouveau_range_fault(struct hmm_mirror *mirror, struct hmm_range *range)
 	if (ret <= 0) {
 		if (ret == 0)
 			ret = -EBUSY;
-		up_read(&range->vma->vm_mm->mmap_sem);
+		up_read(&range->hmm->mm->mmap_sem);
 		hmm_range_unregister(range);
 		return ret;
 	}
@@ -681,7 +681,6 @@ nouveau_svm_fault(struct nvif_notify *notify)
 			 args.i.p.addr + args.i.p.size, fn - fi);
 
 		/* Have HMM fault pages within the fault window to the GPU. */
-		range.vma = vma;
 		range.start = args.i.p.addr;
 		range.end = args.i.p.addr + args.i.p.size;
 		range.pfns = args.phys;
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index 9f32586684c9c3..d4b89f655817cd 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -164,7 +164,6 @@ enum hmm_pfn_value_e {
  */
 struct hmm_range {
 	struct hmm		*hmm;
-	struct vm_area_struct	*vma;
 	struct list_head	list;
 	unsigned long		start;
 	unsigned long		end;
diff --git a/mm/hmm.c b/mm/hmm.c
index 16b6731a34db79..3d8cdfb67a6ab8 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -285,8 +285,9 @@ struct hmm_vma_walk {
 	struct hmm_range	*range;
 	struct dev_pagemap	*pgmap;
 	unsigned long		last;
-	bool			fault;
-	bool			block;
+	bool			fault : 1;
+	bool			block : 1;
+	bool			hugetlb : 1;
 };
 
 static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr,
@@ -635,7 +636,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
 	if (pmd_none(pmd))
 		return hmm_vma_walk_hole(start, end, walk);
 
-	if (pmd_huge(pmd) && (range->vma->vm_flags & VM_HUGETLB))
+	if (pmd_huge(pmd) && hmm_vma_walk->hugetlb)
 		return hmm_pfns_bad(start, end, walk);
 
 	if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
@@ -994,7 +995,7 @@ long hmm_range_snapshot(struct hmm_range *range)
 			return -EPERM;
 		}
 
-		range->vma = vma;
+		hmm_vma_walk.hugetlb = vma->vm_flags & VM_HUGETLB;
 		hmm_vma_walk.pgmap = NULL;
 		hmm_vma_walk.last = start;
 		hmm_vma_walk.fault = false;
@@ -1090,7 +1091,7 @@ long hmm_range_fault(struct hmm_range *range, bool block)
 			return -EPERM;
 		}
 
-		range->vma = vma;
+		hmm_vma_walk.hugetlb = vma->vm_flags & VM_HUGETLB;
 		hmm_vma_walk.pgmap = NULL;
 		hmm_vma_walk.last = start;
 		hmm_vma_walk.fault = true;
-- 
2.22.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ