[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190211125337.16099-2-chintan.pandya@oneplus.com>
Date: Mon, 11 Feb 2019 12:53:53 +0000
From: Chintan Pandya <chintan.pandya@...plus.com>
To: Linux Upstream <linux.upstream@...plus.com>,
"hughd@...gle.com" <hughd@...gle.com>,
"peterz@...radead.org" <peterz@...radead.org>,
"jack@...e.cz" <jack@...e.cz>,
"mawilcox@...rosoft.com" <mawilcox@...rosoft.com>,
"akpm@...ux-foundation.org" <akpm@...ux-foundation.org>
CC: "linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"linux-mm@...ck.org" <linux-mm@...ck.org>,
Chintan Pandya <chintan.pandya@...plus.com>
Subject: [RFC 1/2] page-flags: Make page lock operation atomic
Currently, page lock operation is non-atomic. This is opening
some scope for race condition. For ex, if 2 threads are accessing
same page flags, it may happen that our desired thread's page
lock bit (PG_locked) might get overwritten by other thread
leaving page unlocked. This can cause issues later when some
code expects page to be locked but it is not.
Make page lock/unlock operation use the atomic version of
set_bit API. There are other flag set operations which still
uses non-atomic version of set_bit API. Bit, that might be
the change for the future.
Change-Id: I13bdbedc2b198af014d885e1925c93b83ed6660e
Signed-off-by: Chintan Pandya <chintan.pandya@...plus.com>
---
fs/cifs/file.c | 8 ++++----
fs/pipe.c | 2 +-
include/linux/page-flags.h | 2 +-
include/linux/pagemap.h | 6 +++---
mm/filemap.c | 4 ++--
mm/khugepaged.c | 2 +-
mm/ksm.c | 2 +-
mm/memory-failure.c | 2 +-
mm/memory.c | 2 +-
mm/migrate.c | 2 +-
mm/shmem.c | 6 +++---
mm/swap_state.c | 4 ++--
mm/vmscan.c | 2 +-
13 files changed, 22 insertions(+), 22 deletions(-)
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 7d6539a04fac..23bcdee37239 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -3661,13 +3661,13 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
* should have access to this page, we're safe to simply set
* PG_locked without checking it first.
*/
- __SetPageLocked(page);
+ SetPageLocked(page);
rc = add_to_page_cache_locked(page, mapping,
page->index, gfp);
/* give up if we can't stick it in the cache */
if (rc) {
- __ClearPageLocked(page);
+ ClearPageLocked(page);
return rc;
}
@@ -3688,9 +3688,9 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
if (*bytes + PAGE_SIZE > rsize)
break;
- __SetPageLocked(page);
+ SetPageLocked(page);
if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
- __ClearPageLocked(page);
+ ClearPageLocked(page);
break;
}
list_move_tail(&page->lru, tmplist);
diff --git a/fs/pipe.c b/fs/pipe.c
index 8ef7d7bef775..1bab40a2ca44 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -147,7 +147,7 @@ static int anon_pipe_buf_steal(struct pipe_inode_info *pipe,
if (page_count(page) == 1) {
if (memcg_kmem_enabled())
memcg_kmem_uncharge(page, 0);
- __SetPageLocked(page);
+ SetPageLocked(page);
return 0;
}
return 1;
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 5af67406b9c9..a56a9bd4bc6b 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -268,7 +268,7 @@ static inline int TestClearPage##uname(struct page *page) { return 0; }
#define TESTSCFLAG_FALSE(uname) \
TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname)
-__PAGEFLAG(Locked, locked, PF_NO_TAIL)
+PAGEFLAG(Locked, locked, PF_NO_TAIL)
PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
PAGEFLAG(Error, error, PF_NO_COMPOUND) TESTCLEARFLAG(Error, error, PF_NO_COMPOUND)
PAGEFLAG(Referenced, referenced, PF_HEAD)
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 51a9a0af3281..87a0447cfbe0 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -619,17 +619,17 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
/*
* Like add_to_page_cache_locked, but used to add newly allocated pages:
- * the page is new, so we can just run __SetPageLocked() against it.
+ * the page is new, so we can just run SetPageLocked() against it.
*/
static inline int add_to_page_cache(struct page *page,
struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
{
int error;
- __SetPageLocked(page);
+ SetPageLocked(page);
error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
if (unlikely(error))
- __ClearPageLocked(page);
+ ClearPageLocked(page);
return error;
}
diff --git a/mm/filemap.c b/mm/filemap.c
index 8e09304af1ec..14284726cf3a 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -807,11 +807,11 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
void *shadow = NULL;
int ret;
- __SetPageLocked(page);
+ SetPageLocked(page);
ret = __add_to_page_cache_locked(page, mapping, offset,
gfp_mask, &shadow);
if (unlikely(ret))
- __ClearPageLocked(page);
+ ClearPageLocked(page);
else {
/*
* The page might have been evicted from cache only
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index aaae33402d61..2e8f5bfa066d 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1341,7 +1341,7 @@ static void collapse_shmem(struct mm_struct *mm,
new_page->index = start;
new_page->mapping = mapping;
__SetPageSwapBacked(new_page);
- __SetPageLocked(new_page);
+ SetPageLocked(new_page);
BUG_ON(!page_ref_freeze(new_page, 1));
diff --git a/mm/ksm.c b/mm/ksm.c
index 31e6420c209b..115091798a6d 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -2531,7 +2531,7 @@ struct page *ksm_might_need_to_copy(struct page *page,
SetPageDirty(new_page);
__SetPageUptodate(new_page);
- __SetPageLocked(new_page);
+ SetPageLocked(new_page);
}
return new_page;
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 42d8fa64cebc..1a7c31b7d7e3 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1209,7 +1209,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
/*
* We ignore non-LRU pages for good reasons.
* - PG_locked is only well defined for LRU pages and a few others
- * - to avoid races with __SetPageLocked()
+ * - to avoid races with SetPageLocked()
* - to avoid races with __SetPageSlab*() (and more non-atomic ops)
* The check (unnecessarily) ignores LRU pages being isolated and
* walked by the page reclaim code, however that's not a big loss.
diff --git a/mm/memory.c b/mm/memory.c
index 8b9e5dd20d0c..9d7b107025e7 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3102,7 +3102,7 @@ int do_swap_page(struct vm_fault *vmf)
page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
vmf->address);
if (page) {
- __SetPageLocked(page);
+ SetPageLocked(page);
__SetPageSwapBacked(page);
set_page_private(page, entry.val);
lru_cache_add_anon(page);
diff --git a/mm/migrate.c b/mm/migrate.c
index 12d821ff8401..1b9ed5ca5e8e 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2037,7 +2037,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
}
/* Prepare a page as a migration target */
- __SetPageLocked(new_page);
+ SetPageLocked(new_page);
if (PageSwapBacked(page))
__SetPageSwapBacked(new_page);
diff --git a/mm/shmem.c b/mm/shmem.c
index 8c8af1440184..3305312c7557 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1501,7 +1501,7 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
else
page = shmem_alloc_page(gfp, info, index);
if (page) {
- __SetPageLocked(page);
+ SetPageLocked(page);
__SetPageSwapBacked(page);
return page;
}
@@ -1554,7 +1554,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
copy_highpage(newpage, oldpage);
flush_dcache_page(newpage);
- __SetPageLocked(newpage);
+ SetPageLocked(newpage);
__SetPageSwapBacked(newpage);
SetPageUptodate(newpage);
set_page_private(newpage, swap_index);
@@ -2277,7 +2277,7 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
}
VM_BUG_ON(PageLocked(page) || PageSwapBacked(page));
- __SetPageLocked(page);
+ SetPageLocked(page);
__SetPageSwapBacked(page);
__SetPageUptodate(page);
diff --git a/mm/swap_state.c b/mm/swap_state.c
index bec3d214084b..caa652f1d8a6 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -480,7 +480,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
}
/* May fail (-ENOMEM) if radix-tree node allocation failed. */
- __SetPageLocked(new_page);
+ SetPageLocked(new_page);
__SetPageSwapBacked(new_page);
err = __add_to_swap_cache(new_page, entry);
if (likely(!err)) {
@@ -498,7 +498,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
return new_page;
}
radix_tree_preload_end();
- __ClearPageLocked(new_page);
+ ClearPageLocked(new_page);
/*
* add_to_swap_cache() doesn't return -EEXIST, so we can safely
* clear SWAP_HAS_CACHE flag.
diff --git a/mm/vmscan.c b/mm/vmscan.c
index ead2c52008fa..c01aa130b9ba 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1577,7 +1577,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
* we obviously don't have to worry about waking up a process
* waiting on the page lock, because there are no references.
*/
- __ClearPageLocked(page);
+ ClearPageLocked(page);
free_it:
nr_reclaimed++;
--
2.17.1
Powered by blists - more mailing lists