lists.openwall.net | lists / announce owl-users owl-dev john-users john-dev passwdqc-users yescrypt popa3d-users / oss-security kernel-hardening musl sabotage tlsify passwords / crypt-dev xvendor / Bugtraq Full-Disclosure linux-kernel linux-netdev linux-ext4 linux-hardening linux-cve-announce PHC | |
Open Source and information security mailing list archives
| ||
|
Message-Id: <20171212114544.56680-4-kirill.shutemov@linux.intel.com> Date: Tue, 12 Dec 2017 14:45:44 +0300 From: "Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com> To: Tom Lendacky <thomas.lendacky@....com>, Thomas Gleixner <tglx@...utronix.de>, Ingo Molnar <mingo@...nel.org>, "H. Peter Anvin" <hpa@...or.com> Cc: x86@...nel.org, Borislav Petkov <bp@...e.de>, Brijesh Singh <brijesh.singh@....com>, linux-mm@...ck.org, linux-kernel@...r.kernel.org, "Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com> Subject: [PATCH 3/3] x86/mm/encrypt: Rewrite sme_pgtable_calc() sme_pgtable_calc() is unnecessary complex. It can be re-written in a more stream-lined way. As a side effect, we would get the code ready to boot-time switching between paging modes. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@...ux.intel.com> --- arch/x86/mm/mem_encrypt.c | 42 ++++++++++++------------------------------ 1 file changed, 12 insertions(+), 30 deletions(-) diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index f1f0a3fa7489..fe7fc1c6eaf7 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -561,8 +561,7 @@ static void __init sme_map_range_decrypted_wp(pgd_t *pgd, static unsigned long __init sme_pgtable_calc(unsigned long len) { - unsigned long p4d_size, pud_size, pmd_size, pte_size; - unsigned long total; + unsigned long entries, tables; /* * Perform a relatively simplistic calculation of the pagetable @@ -572,42 +571,25 @@ static unsigned long __init sme_pgtable_calc(unsigned long len) * mappings. Incrementing the count for each covers the case where * the addresses cross entries. */ - if (IS_ENABLED(CONFIG_X86_5LEVEL)) { - p4d_size = (ALIGN(len, PGDIR_SIZE) / PGDIR_SIZE) + 1; - p4d_size *= sizeof(p4d_t) * PTRS_PER_P4D; - pud_size = (ALIGN(len, P4D_SIZE) / P4D_SIZE) + 1; - pud_size *= sizeof(pud_t) * PTRS_PER_PUD; - } else { - p4d_size = 0; - pud_size = (ALIGN(len, PGDIR_SIZE) / PGDIR_SIZE) + 1; - pud_size *= sizeof(pud_t) * PTRS_PER_PUD; - } - pmd_size = (ALIGN(len, PUD_SIZE) / PUD_SIZE) + 1; - pmd_size *= sizeof(pmd_t) * PTRS_PER_PMD; - pte_size = 2 * sizeof(pte_t) * PTRS_PER_PTE; - total = p4d_size + pud_size + pmd_size + pte_size; + /* PGDIR_SIZE is equal to P4D_SIZE on 4-level machine. */ + if (PTRS_PER_P4D > 1) + entries = (DIV_ROUND_UP(len, PGDIR_SIZE) + 1) * sizeof(p4d_t) * PTRS_PER_P4D; + entries += (DIV_ROUND_UP(len, P4D_SIZE) + 1) * sizeof(pud_t) * PTRS_PER_PUD; + entries += (DIV_ROUND_UP(len, PUD_SIZE) + 1) * sizeof(pmd_t) * PTRS_PER_PMD; + entries += 2 * sizeof(pte_t) * PTRS_PER_PTE; /* * Now calculate the added pagetable structures needed to populate * the new pagetables. */ - if (IS_ENABLED(CONFIG_X86_5LEVEL)) { - p4d_size = ALIGN(total, PGDIR_SIZE) / PGDIR_SIZE; - p4d_size *= sizeof(p4d_t) * PTRS_PER_P4D; - pud_size = ALIGN(total, P4D_SIZE) / P4D_SIZE; - pud_size *= sizeof(pud_t) * PTRS_PER_PUD; - } else { - p4d_size = 0; - pud_size = ALIGN(total, PGDIR_SIZE) / PGDIR_SIZE; - pud_size *= sizeof(pud_t) * PTRS_PER_PUD; - } - pmd_size = ALIGN(total, PUD_SIZE) / PUD_SIZE; - pmd_size *= sizeof(pmd_t) * PTRS_PER_PMD; - total += p4d_size + pud_size + pmd_size; + if (PTRS_PER_P4D > 1) + tables = DIV_ROUND_UP(entries, PGDIR_SIZE) * sizeof(p4d_t) * PTRS_PER_P4D; + tables += DIV_ROUND_UP(entries, P4D_SIZE) * sizeof(pud_t) * PTRS_PER_PUD; + tables += DIV_ROUND_UP(entries, PUD_SIZE) * sizeof(pmd_t) * PTRS_PER_PMD; - return total; + return entries + tables; } void __init sme_encrypt_kernel(struct boot_params *bp) -- 2.15.0
Powered by blists - more mailing lists