[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <lsq.1538257387.848379423@decadent.org.uk>
Date: Sat, 29 Sep 2018 22:43:07 +0100
From: Ben Hutchings <ben@...adent.org.uk>
To: linux-kernel@...r.kernel.org, stable@...r.kernel.org
CC: akpm@...ux-foundation.org, "Chen Liqin" <liqin.linux@...il.com>,
"Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>,
"Lennox Wu" <lennox.wu@...il.com>,
"Linus Torvalds" <torvalds@...ux-foundation.org>
Subject: [PATCH 3.16 082/131] score: drop _PAGE_FILE and pte_file()-related
helpers
3.16.59-rc1 review patch. If anyone has any objections, please let me know.
------------------
From: "Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>
commit 917e401ea75478d4f4575bc8b0ef3d14ecf9ef69 upstream.
We've replaced remap_file_pages(2) implementation with emulation.
Nobody creates non-linear mapping anymore.
This patch also increase number of bits availble for swap offset.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@...ux.intel.com>
Cc: Chen Liqin <liqin.linux@...il.com>
Cc: Lennox Wu <lennox.wu@...il.com>
Signed-off-by: Andrew Morton <akpm@...ux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@...ux-foundation.org>
Signed-off-by: Ben Hutchings <ben@...adent.org.uk>
---
arch/score/include/asm/pgtable-bits.h | 1 -
arch/score/include/asm/pgtable.h | 18 ++----------------
2 files changed, 2 insertions(+), 17 deletions(-)
--- a/arch/score/include/asm/pgtable-bits.h
+++ b/arch/score/include/asm/pgtable-bits.h
@@ -6,7 +6,6 @@
#define _PAGE_WRITE (1<<7) /* implemented in software */
#define _PAGE_PRESENT (1<<9) /* implemented in software */
#define _PAGE_MODIFIED (1<<10) /* implemented in software */
-#define _PAGE_FILE (1<<10)
#define _PAGE_GLOBAL (1<<0)
#define _PAGE_VALID (1<<1)
--- a/arch/score/include/asm/pgtable.h
+++ b/arch/score/include/asm/pgtable.h
@@ -90,15 +90,6 @@ static inline void pmd_clear(pmd_t *pmdp
((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
#define pte_unmap(pte) ((void)(pte))
-/*
- * Bits 9(_PAGE_PRESENT) and 10(_PAGE_FILE)are taken,
- * split up 30 bits of offset into this range:
- */
-#define PTE_FILE_MAX_BITS 30
-#define pte_to_pgoff(_pte) \
- (((_pte).pte & 0x1ff) | (((_pte).pte >> 11) << 9))
-#define pgoff_to_pte(off) \
- ((pte_t) {((off) & 0x1ff) | (((off) >> 9) << 11) | _PAGE_FILE})
#define __pte_to_swp_entry(pte) \
((swp_entry_t) { pte_val(pte)})
#define __swp_entry_to_pte(x) ((pte_t) {(x).val})
@@ -169,8 +160,8 @@ static inline pgprot_t pgprot_noncached(
}
#define __swp_type(x) ((x).val & 0x1f)
-#define __swp_offset(x) ((x).val >> 11)
-#define __swp_entry(type, offset) ((swp_entry_t){(type) | ((offset) << 11)})
+#define __swp_offset(x) ((x).val >> 10)
+#define __swp_entry(type, offset) ((swp_entry_t){(type) | ((offset) << 10)})
extern unsigned long empty_zero_page;
extern unsigned long zero_page_mask;
@@ -198,11 +189,6 @@ static inline int pte_young(pte_t pte)
return pte_val(pte) & _PAGE_ACCESSED;
}
-static inline int pte_file(pte_t pte)
-{
- return pte_val(pte) & _PAGE_FILE;
-}
-
#define pte_special(pte) (0)
static inline pte_t pte_wrprotect(pte_t pte)
Powered by blists - more mailing lists