[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <e1a5ce03-1b8d-8118-de95-b53901e94b50@amd.com>
Date: Wed, 27 Nov 2019 09:42:45 +0100
From: Christian König <christian.koenig@....com>
To: Thomas Hellström (VMware)
<thomas_os@...pmail.org>, dri-devel@...ts.freedesktop.org,
linux-mm@...ck.org, linux-kernel@...r.kernel.org,
linux-graphics-maintainer@...are.com
Cc: Thomas Hellstrom <thellstrom@...are.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Michal Hocko <mhocko@...e.com>,
"Matthew Wilcox (Oracle)" <willy@...radead.org>,
"Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>,
Ralph Campbell <rcampbell@...dia.com>,
Jérôme Glisse <jglisse@...hat.com>
Subject: Re: [PATCH 1/2] mm: Add and export vmf_insert_mixed_prot()
Am 26.11.19 um 21:27 schrieb Thomas Hellström (VMware):
> From: Thomas Hellstrom <thellstrom@...are.com>
>
> The TTM module today uses a hack to be able to set a different page
> protection than struct vm_area_struct::vm_page_prot. To be able to do
> this properly, add and export vmf_insert_mixed_prot().
>
> Cc: Andrew Morton <akpm@...ux-foundation.org>
> Cc: Michal Hocko <mhocko@...e.com>
> Cc: "Matthew Wilcox (Oracle)" <willy@...radead.org>
> Cc: "Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>
> Cc: Ralph Campbell <rcampbell@...dia.com>
> Cc: "Jérôme Glisse" <jglisse@...hat.com>
> Cc: "Christian König" <christian.koenig@....com>
> Signed-off-by: Thomas Hellstrom <thellstrom@...are.com>
Acked-by: Christian König <christian.koenig@....com>
> ---
> include/linux/mm.h | 2 ++
> mm/memory.c | 15 +++++++++++----
> 2 files changed, 13 insertions(+), 4 deletions(-)
>
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index cc292273e6ba..29575d3c1e47 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -2548,6 +2548,8 @@ vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
> unsigned long pfn, pgprot_t pgprot);
> vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
> pfn_t pfn);
> +vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
> + pfn_t pfn, pgprot_t pgprot);
> vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
> unsigned long addr, pfn_t pfn);
> int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
> diff --git a/mm/memory.c b/mm/memory.c
> index b1ca51a079f2..28f162e28144 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -1719,9 +1719,9 @@ static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn)
> }
>
> static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
> - unsigned long addr, pfn_t pfn, bool mkwrite)
> + unsigned long addr, pfn_t pfn, pgprot_t pgprot,
> + bool mkwrite)
> {
> - pgprot_t pgprot = vma->vm_page_prot;
> int err;
>
> BUG_ON(!vm_mixed_ok(vma, pfn));
> @@ -1764,10 +1764,17 @@ static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
> return VM_FAULT_NOPAGE;
> }
>
> +vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
> + pfn_t pfn, pgprot_t pgprot)
> +{
> + return __vm_insert_mixed(vma, addr, pfn, pgprot, false);
> +}
> +EXPORT_SYMBOL(vmf_insert_mixed_prot);
> +
> vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
> pfn_t pfn)
> {
> - return __vm_insert_mixed(vma, addr, pfn, false);
> + return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, false);
> }
> EXPORT_SYMBOL(vmf_insert_mixed);
>
> @@ -1779,7 +1786,7 @@ EXPORT_SYMBOL(vmf_insert_mixed);
> vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
> unsigned long addr, pfn_t pfn)
> {
> - return __vm_insert_mixed(vma, addr, pfn, true);
> + return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, true);
> }
> EXPORT_SYMBOL(vmf_insert_mixed_mkwrite);
>
Powered by blists - more mailing lists