[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200212041845.25879-7-willy@infradead.org>
Date: Tue, 11 Feb 2020 20:18:26 -0800
From: Matthew Wilcox <willy@...radead.org>
To: linux-fsdevel@...r.kernel.org, linux-mm@...ck.org
Cc: "Matthew Wilcox (Oracle)" <willy@...radead.org>,
linux-kernel@...r.kernel.org
Subject: [PATCH v2 06/25] mm: Allow hpages to be arbitrary order
From: "Matthew Wilcox (Oracle)" <willy@...radead.org>
Remove the assumption in hpage_nr_pages() that compound pages are
necessarily PMD sized. The return type needs to be signed as we need
to use the negative value, eg when calling update_lru_size().
Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
---
include/linux/huge_mm.h | 10 +++-------
1 file changed, 3 insertions(+), 7 deletions(-)
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 5aca3d1bdb32..16367e2f771e 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -230,12 +230,8 @@ static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
else
return NULL;
}
-static inline int hpage_nr_pages(struct page *page)
-{
- if (unlikely(PageTransHuge(page)))
- return HPAGE_PMD_NR;
- return 1;
-}
+
+#define hpage_nr_pages(page) (long)compound_nr(page)
struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
@@ -289,7 +285,7 @@ static inline struct list_head *page_deferred_list(struct page *page)
#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
-#define hpage_nr_pages(x) 1
+#define hpage_nr_pages(x) 1L
static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
{
--
2.25.0
Powered by blists - more mailing lists