[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190529072109.GB3656@rapoport-lnx>
Date: Wed, 29 May 2019 10:21:10 +0300
From: Mike Rapoport <rppt@...ux.ibm.com>
To: "Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>
Cc: Andrew Morton <akpm@...ux-foundation.org>, x86@...nel.org,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
"H. Peter Anvin" <hpa@...or.com>, Borislav Petkov <bp@...en8.de>,
Peter Zijlstra <peterz@...radead.org>,
Andy Lutomirski <luto@...capital.net>,
David Howells <dhowells@...hat.com>,
Kees Cook <keescook@...omium.org>,
Dave Hansen <dave.hansen@...el.com>,
Kai Huang <kai.huang@...ux.intel.com>,
Jacob Pan <jacob.jun.pan@...ux.intel.com>,
Alison Schofield <alison.schofield@...el.com>,
linux-mm@...ck.org, kvm@...r.kernel.org, keyrings@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH, RFC 02/62] mm: Add helpers to setup zero page mappings
On Wed, May 08, 2019 at 05:43:22PM +0300, Kirill A. Shutemov wrote:
> When kernel setups an encrypted page mapping, encryption KeyID is
Nit: "when kernel sets up an encrypted..."
> derived from a VMA. KeyID is going to be part of vma->vm_page_prot and
> it will be propagated transparently to page table entry on mk_pte().
>
> But there is an exception: zero page is never encrypted and its mapping
> must use KeyID-0, regardless VMA's KeyID.
>
> Introduce helpers that create a page table entry for zero page.
>
> The generic implementation will be overridden by architecture-specific
> code that takes care about using correct KeyID.
>
> Signed-off-by: Kirill A. Shutemov <kirill.shutemov@...ux.intel.com>
> ---
> fs/dax.c | 3 +--
> include/asm-generic/pgtable.h | 8 ++++++++
> mm/huge_memory.c | 6 ++----
> mm/memory.c | 3 +--
> mm/userfaultfd.c | 3 +--
> 5 files changed, 13 insertions(+), 10 deletions(-)
>
> diff --git a/fs/dax.c b/fs/dax.c
> index e5e54da1715f..6d609bff53b9 100644
> --- a/fs/dax.c
> +++ b/fs/dax.c
> @@ -1441,8 +1441,7 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
> pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
> mm_inc_nr_ptes(vma->vm_mm);
> }
> - pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
> - pmd_entry = pmd_mkhuge(pmd_entry);
> + pmd_entry = mk_zero_pmd(zero_page, vmf->vma->vm_page_prot);
> set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
> spin_unlock(ptl);
> trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry);
> diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
> index fa782fba51ee..cde8b81f6f2b 100644
> --- a/include/asm-generic/pgtable.h
> +++ b/include/asm-generic/pgtable.h
> @@ -879,8 +879,16 @@ static inline unsigned long my_zero_pfn(unsigned long addr)
> }
> #endif
>
> +#ifndef mk_zero_pte
> +#define mk_zero_pte(addr, prot) pte_mkspecial(pfn_pte(my_zero_pfn(addr), prot))
> +#endif
> +
> #ifdef CONFIG_MMU
>
> +#ifndef mk_zero_pmd
> +#define mk_zero_pmd(zero_page, prot) pmd_mkhuge(mk_pmd(zero_page, prot))
> +#endif
> +
> #ifndef CONFIG_TRANSPARENT_HUGEPAGE
> static inline int pmd_trans_huge(pmd_t pmd)
> {
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 165ea46bf149..26c3503824ba 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -675,8 +675,7 @@ static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
> pmd_t entry;
> if (!pmd_none(*pmd))
> return false;
> - entry = mk_pmd(zero_page, vma->vm_page_prot);
> - entry = pmd_mkhuge(entry);
> + entry = mk_zero_pmd(zero_page, vma->vm_page_prot);
> if (pgtable)
> pgtable_trans_huge_deposit(mm, pmd, pgtable);
> set_pmd_at(mm, haddr, pmd, entry);
> @@ -2101,8 +2100,7 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
>
> for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
> pte_t *pte, entry;
> - entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
> - entry = pte_mkspecial(entry);
> + entry = mk_zero_pte(haddr, vma->vm_page_prot);
> pte = pte_offset_map(&_pmd, haddr);
> VM_BUG_ON(!pte_none(*pte));
> set_pte_at(mm, haddr, pte, entry);
> diff --git a/mm/memory.c b/mm/memory.c
> index ab650c21bccd..c5e0c87a12b7 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -2927,8 +2927,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
> /* Use the zero-page for reads */
> if (!(vmf->flags & FAULT_FLAG_WRITE) &&
> !mm_forbids_zeropage(vma->vm_mm)) {
> - entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
> - vma->vm_page_prot));
> + entry = mk_zero_pte(vmf->address, vma->vm_page_prot);
> vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
> vmf->address, &vmf->ptl);
> if (!pte_none(*vmf->pte))
> diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
> index d59b5a73dfb3..ac1ce3866036 100644
> --- a/mm/userfaultfd.c
> +++ b/mm/userfaultfd.c
> @@ -122,8 +122,7 @@ static int mfill_zeropage_pte(struct mm_struct *dst_mm,
> pgoff_t offset, max_off;
> struct inode *inode;
>
> - _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
> - dst_vma->vm_page_prot));
> + _dst_pte = mk_zero_pte(dst_addr, dst_vma->vm_page_prot);
> dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
> if (dst_vma->vm_file) {
> /* the shmem MAP_PRIVATE case requires checking the i_size */
> --
> 2.20.1
>
--
Sincerely yours,
Mike.
Powered by blists - more mailing lists