[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date: Tue, 3 Mar 2020 15:54:45 -0500
From: Arvind Sankar <nivedita@...m.mit.edu>
To: Dave Hansen <dave.hansen@...ux.intel.com>,
Andy Lutomirski <luto@...nel.org>,
Peter Zijlstra <peterz@...radead.org>
Cc: Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
"H. Peter Anvin" <hpa@...or.com>, x86@...nel.org,
Ard Biesheuvel <ardb@...nel.org>, linux-efi@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH 4/4] x86/mm/pat: Make num_pages consistent in populate_{pte,pud,pgd}
The number of pages is currently all of int, unsigned int, long and
unsigned long in different places.
Change it to be consistently unsigned long.
Remove the unnecessary min(num_pages, cur_pages), since pre_end has
already been min'd with start + num_pages << PAGE_SHIFT. This gets rid
of two conversions to int/unsigned int.
Signed-off-by: Arvind Sankar <nivedita@...m.mit.edu>
---
arch/x86/include/asm/pgtable_types.h | 2 +-
arch/x86/mm/pat/set_memory.c | 13 ++++++-------
2 files changed, 7 insertions(+), 8 deletions(-)
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 0239998d8cdc..894569255a95 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -574,7 +574,7 @@ extern pmd_t *lookup_pmd_address(unsigned long address);
extern phys_addr_t slow_virt_to_phys(void *__address);
extern int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn,
unsigned long address,
- unsigned numpages,
+ unsigned long numpages,
unsigned long page_flags);
extern int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address,
unsigned long numpages);
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index 2f98423ef69a..51b64937cc16 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -1230,7 +1230,7 @@ static int alloc_pmd_page(pud_t *pud)
static void populate_pte(struct cpa_data *cpa,
unsigned long start, unsigned long end,
- unsigned num_pages, pmd_t *pmd, pgprot_t pgprot)
+ unsigned long num_pages, pmd_t *pmd, pgprot_t pgprot)
{
pte_t *pte;
@@ -1249,9 +1249,9 @@ static void populate_pte(struct cpa_data *cpa,
static int populate_pmd(struct cpa_data *cpa,
unsigned long start, unsigned long end,
- unsigned num_pages, pud_t *pud, pgprot_t pgprot)
+ unsigned long num_pages, pud_t *pud, pgprot_t pgprot)
{
- long cur_pages = 0;
+ unsigned long cur_pages = 0;
pmd_t *pmd;
pgprot_t pmd_pgprot;
@@ -1264,7 +1264,6 @@ static int populate_pmd(struct cpa_data *cpa,
pre_end = min_t(unsigned long, pre_end, next_page);
cur_pages = (pre_end - start) >> PAGE_SHIFT;
- cur_pages = min_t(unsigned int, num_pages, cur_pages);
/*
* Need a PTE page?
@@ -1326,7 +1325,7 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, p4d_t *p4d,
{
pud_t *pud;
unsigned long end;
- long cur_pages = 0;
+ unsigned long cur_pages = 0;
pgprot_t pud_pgprot;
int ret;
@@ -1342,7 +1341,6 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, p4d_t *p4d,
pre_end = min_t(unsigned long, end, next_page);
cur_pages = (pre_end - start) >> PAGE_SHIFT;
- cur_pages = min_t(int, (int)cpa->numpages, cur_pages);
pud = pud_offset(p4d, start);
@@ -2231,7 +2229,8 @@ bool kernel_page_present(struct page *page)
#endif /* CONFIG_HIBERNATION */
int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
- unsigned numpages, unsigned long page_flags)
+ unsigned long numpages,
+ unsigned long page_flags)
{
int retval = -EINVAL;
--
2.24.1
Powered by blists - more mailing lists