lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date:	Tue, 5 Apr 2016 13:37:19 -0700 (PDT)
From:	Hugh Dickins <hughd@...gle.com>
To:	Andrew Morton <akpm@...ux-foundation.org>
cc:	"Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>,
	Andrea Arcangeli <aarcange@...hat.com>,
	Andres Lagar-Cavilla <andreslc@...gle.com>,
	Yang Shi <yang.shi@...aro.org>, Ning Qu <quning@...il.com>,
	Hugh Dickins <hughd@...gle.com>, linux-kernel@...r.kernel.org,
	linux-mm@...ck.org
Subject: [PATCH 00/10] mm: easy preliminaries to THPagecache

I've rebased my huge tmpfs series against v4.6-rc2, and split it into
two sets.  This is a set of miscellaneous premliminaries that I think
we can agree to put into mmotm right away, to be included in v4.7: or
if not, then I later rework the subsequent huge tmpfs series to avoid
or include them; but for now it expects these go in ahead.

These don't assume or commit us to any particular implementation of
huge tmpfs, though most of them are tidyups that came from that work.
01-04 are similar to what I posted in February 2015, I think 05 is the
only interesting patch here, if 06 is rejected then we can just keep
it for our own testing, 07-10 clear away some small obstructions.

But this is a weird 00/10 because it includes a patch at the bottom
itself: v4.6-rc2 missed out Kirill's page_cache_* removal, but that
is assumed in the following patches; so 00/10 should be applied if
you're basing on top of v4.6-rc2, but not applied to a later tree.

00 mm: get rid of a few rc2 page_cache_*
01 mm: update_lru_size warn and reset bad lru_size
02 mm: update_lru_size do the __mod_zone_page_state
03 mm: use __SetPageSwapBacked and dont ClearPageSwapBacked
04 tmpfs: preliminary minor tidyups
05 tmpfs: mem_cgroup charge fault to vm_mm not current mm
06 mm: /proc/sys/vm/stat_refresh to force vmstat update
07 huge mm: move_huge_pmd does not need new_vma
08 huge pagecache: extend mremap pmd rmap lockout to files
09 huge pagecache: mmap_sem is unlocked when truncation splits pmd
10 arch: fix has_transparent_hugepage()

 Documentation/sysctl/vm.txt                  |   14 +
 arch/arc/include/asm/hugepage.h              |    2 
 arch/arm/include/asm/pgtable-3level.h        |    5 
 arch/arm64/include/asm/pgtable.h             |    5 
 arch/mips/include/asm/pgtable.h              |    1 
 arch/mips/mm/tlb-r4k.c                       |   21 +-
 arch/powerpc/include/asm/book3s/64/pgtable.h |    1 
 arch/powerpc/include/asm/pgtable.h           |    1 
 arch/s390/include/asm/pgtable.h              |    1 
 arch/sparc/include/asm/pgtable_64.h          |    2 
 arch/tile/include/asm/pgtable.h              |    1 
 arch/x86/include/asm/pgtable.h               |    1 
 include/asm-generic/pgtable.h                |    8 
 include/linux/huge_mm.h                      |    4 
 include/linux/memcontrol.h                   |    6 
 include/linux/mempolicy.h                    |    6 
 include/linux/mm_inline.h                    |   24 ++
 include/linux/vmstat.h                       |    4 
 kernel/sysctl.c                              |    7 
 mm/filemap.c                                 |    4 
 mm/huge_memory.c                             |    7 
 mm/memcontrol.c                              |   26 ++
 mm/memory.c                                  |   17 -
 mm/migrate.c                                 |    6 
 mm/mremap.c                                  |   47 ++---
 mm/rmap.c                                    |    4 
 mm/shmem.c                                   |  148 +++++++----------
 mm/swap_state.c                              |    3 
 mm/vmscan.c                                  |   23 +-
 mm/vmstat.c                                  |   58 ++++++
 30 files changed, 271 insertions(+), 186 deletions(-)

[PATCH 00/10] mm: get rid of a few rc2 page_cache_*

Not-harebrained Linus forgot to apply Kirill's PAGE_CACHE_* page_cache_*
riddance in rc2, but did so the next day: this and the huge tmpfs series
assume that those changes have been made, so if applying these series to
vanilla v4.6-rc2 as intended, this patch resolves the few clashes first.

Signed-off-by: Hugh Dickins <hughd@...gle.com>
---
 mm/filemap.c |    4 ++--
 mm/memory.c  |    6 +++---
 mm/rmap.c    |    2 +-
 mm/shmem.c   |   18 +++++++++---------
 4 files changed, 15 insertions(+), 15 deletions(-)

--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2178,8 +2178,8 @@ repeat:
 		if (page->mapping != mapping || !PageUptodate(page))
 			goto unlock;
 
-		size = round_up(i_size_read(mapping->host), PAGE_CACHE_SIZE);
-		if (page->index >= size >> PAGE_CACHE_SHIFT)
+		size = round_up(i_size_read(mapping->host), PAGE_SIZE);
+		if (page->index >= size >> PAGE_SHIFT)
 			goto unlock;
 
 		pte = vmf->pte + page->index - vmf->pgoff;
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2807,7 +2807,7 @@ static int __do_fault(struct vm_area_str
 	if (unlikely(PageHWPoison(vmf.page))) {
 		if (ret & VM_FAULT_LOCKED)
 			unlock_page(vmf.page);
-		page_cache_release(vmf.page);
+		put_page(vmf.page);
 		return VM_FAULT_HWPOISON;
 	}
 
@@ -2996,7 +2996,7 @@ static int do_read_fault(struct mm_struc
 	if (unlikely(!pte_same(*pte, orig_pte))) {
 		pte_unmap_unlock(pte, ptl);
 		unlock_page(fault_page);
-		page_cache_release(fault_page);
+		put_page(fault_page);
 		return ret;
 	}
 	do_set_pte(vma, address, fault_page, pte, false, false);
@@ -3105,7 +3105,7 @@ static int do_shared_fault(struct mm_str
 	if (unlikely(!pte_same(*pte, orig_pte))) {
 		pte_unmap_unlock(pte, ptl);
 		unlock_page(fault_page);
-		page_cache_release(fault_page);
+		put_page(fault_page);
 		return ret;
 	}
 	do_set_pte(vma, address, fault_page, pte, true, false);
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1541,7 +1541,7 @@ static int try_to_unmap_one(struct page
 
 discard:
 	page_remove_rmap(page, PageHuge(page));
-	page_cache_release(page);
+	put_page(page);
 
 out_unmap:
 	pte_unmap_unlock(pte, ptl);
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -300,7 +300,7 @@ static int shmem_add_to_page_cache(struc
 	VM_BUG_ON_PAGE(!PageLocked(page), page);
 	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
 
-	page_cache_get(page);
+	get_page(page);
 	page->mapping = mapping;
 	page->index = index;
 
@@ -318,7 +318,7 @@ static int shmem_add_to_page_cache(struc
 	} else {
 		page->mapping = NULL;
 		spin_unlock_irq(&mapping->tree_lock);
-		page_cache_release(page);
+		put_page(page);
 	}
 	return error;
 }
@@ -530,7 +530,7 @@ static void shmem_undo_range(struct inod
 		struct page *page = NULL;
 		shmem_getpage(inode, start - 1, &page, SGP_READ, NULL);
 		if (page) {
-			unsigned int top = PAGE_CACHE_SIZE;
+			unsigned int top = PAGE_SIZE;
 			if (start > end) {
 				top = partial_end;
 				partial_end = 0;
@@ -1145,7 +1145,7 @@ static int shmem_getpage_gfp(struct inod
 	int once = 0;
 	int alloced = 0;
 
-	if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT))
+	if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
 		return -EFBIG;
 repeat:
 	swap.val = 0;
@@ -1156,7 +1156,7 @@ repeat:
 	}
 
 	if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
-	    ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
+	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
 		error = -EINVAL;
 		goto unlock;
 	}
@@ -1327,7 +1327,7 @@ clear:
 
 	/* Perhaps the file has been truncated since we checked */
 	if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
-	    ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
+	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
 		if (alloced) {
 			ClearPageDirty(page);
 			delete_from_page_cache(page);
@@ -1355,7 +1355,7 @@ failed:
 unlock:
 	if (page) {
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 	}
 	if (error == -ENOSPC && !once++) {
 		info = SHMEM_I(inode);
@@ -1635,8 +1635,8 @@ static ssize_t shmem_file_read_iter(stru
 	if (!iter_is_iovec(to))
 		sgp = SGP_DIRTY;
 
-	index = *ppos >> PAGE_CACHE_SHIFT;
-	offset = *ppos & ~PAGE_CACHE_MASK;
+	index = *ppos >> PAGE_SHIFT;
+	offset = *ppos & ~PAGE_MASK;
 
 	for (;;) {
 		struct page *page = NULL;

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ