lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date:   Fri, 12 May 2017 11:18:42 +0200
From:   Christian Borntraeger <borntraeger@...ibm.com>
To:     linux-mm@...ck.org,
        Linux Kernel Mailing List <linux-kernel@...r.kernel.org>
Subject: mm: page allocation failures in swap_duplicate ->
 add_swap_count_continuation

Folks,

recently I have seen page allocation failures during
paging in the paging code:
e.g. 

May 05 21:36:53  kernel: Call Trace:
May 05 21:36:53  kernel: ([<0000000000112f62>] show_trace+0x62/0x78)
May 05 21:36:53  kernel:  [<0000000000113050>] show_stack+0x68/0xe0 
May 05 21:36:53  kernel:  [<00000000004fb97e>] dump_stack+0x7e/0xb0 
May 05 21:36:53  kernel:  [<0000000000299262>] warn_alloc+0xf2/0x190 
May 05 21:36:53  kernel:  [<000000000029a25a>] __alloc_pages_nodemask+0xeda/0xfe0 
May 05 21:36:53  kernel:  [<00000000002fa570>] alloc_pages_current+0xb8/0x170 
May 05 21:36:53  kernel:  [<00000000002f03fc>] add_swap_count_continuation+0x3c/0x280 
May 05 21:36:53  kernel:  [<00000000002f068c>] swap_duplicate+0x4c/0x80 
May 05 21:36:53  kernel:  [<00000000002dfbfa>] try_to_unmap_one+0x372/0x578 
May 05 21:36:53  kernel:  [<000000000030131a>] rmap_walk_ksm+0x14a/0x1d8 
May 05 21:36:53  kernel:  [<00000000002e0d60>] try_to_unmap+0x140/0x170 
May 05 21:36:53  kernel:  [<00000000002abc9c>] shrink_page_list+0x944/0xad8 
May 05 21:36:53  kernel:  [<00000000002ac720>] shrink_inactive_list+0x1e0/0x5b8 
May 05 21:36:53  kernel:  [<00000000002ad642>] shrink_node_memcg+0x5e2/0x800 
May 05 21:36:53  kernel:  [<00000000002ad954>] shrink_node+0xf4/0x360 
May 05 21:36:53  kernel:  [<00000000002aeb00>] kswapd+0x330/0x810 
May 05 21:36:53  kernel:  [<0000000000189f14>] kthread+0x144/0x168 
May 05 21:36:53  kernel:  [<00000000008011ea>] kernel_thread_starter+0x6/0xc 
May 05 21:36:53  kernel:  [<00000000008011e4>] kernel_thread_starter+0x0/0xc 

This seems to be new in 4.11 but the relevant code did not seem to have
changed.

Something like this 

diff --git a/mm/swapfile.c b/mm/swapfile.c
index 1781308..b2dd53e 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -3039,7 +3039,7 @@ int swap_duplicate(swp_entry_t entry)
        int err = 0;
 
        while (!err && __swap_duplicate(entry, 1) == -ENOMEM)
-               err = add_swap_count_continuation(entry, GFP_ATOMIC);
+               err = add_swap_count_continuation(entry, GFP_ATOMIC | __GFP_NOWARN);
        return err;
 }
 

seems not appropriate, because this code does not know if the caller can
handle returned errors.

Would something like the following (white space damaged cut'n'paste be ok?
(the try_to_unmap_one change looks fine, not sure if copy_one_pte does the
right thing)

diff --git a/include/linux/swap.h b/include/linux/swap.h
index 45e91dd..4577494 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -391,7 +391,7 @@ extern swp_entry_t get_swap_page_of_type(int);
 extern int get_swap_pages(int n, swp_entry_t swp_entries[]);
 extern int add_swap_count_continuation(swp_entry_t, gfp_t);
 extern void swap_shmem_alloc(swp_entry_t);
-extern int swap_duplicate(swp_entry_t);
+extern int swap_duplicate(swp_entry_t, gfp_t);
 extern int swapcache_prepare(swp_entry_t);
 extern void swap_free(swp_entry_t);
 extern void swapcache_free(swp_entry_t);
@@ -447,7 +447,7 @@ static inline void swap_shmem_alloc(swp_entry_t swp)
 {
 }
 
-static inline int swap_duplicate(swp_entry_t swp)
+int swap_duplicate(swp_entry_t entry, gfp_t gfp_mask)
 {
        return 0;
 }
diff --git a/mm/memory.c b/mm/memory.c
index 235ba51..3ae6f33 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -898,7 +898,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                swp_entry_t entry = pte_to_swp_entry(pte);
 
                if (likely(!non_swap_entry(entry))) {
-                       if (swap_duplicate(entry) < 0)
+                       if (swap_duplicate(entry, __GFP_NOWARN) < 0)
                                return entry.val;
 
                        /* make sure dst_mm is on swapoff's mmlist. */
diff --git a/mm/rmap.c b/mm/rmap.c
index f683801..777feb6 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1433,7 +1433,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                                goto discard;
                        }
 
-                       if (swap_duplicate(entry) < 0) {
+                       if (swap_duplicate(entry, __GFP_NOWARN) < 0) {
                                set_pte_at(mm, address, pvmw.pte, pteval);
                                ret = SWAP_FAIL;
                                page_vma_mapped_walk_done(&pvmw);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 1781308..1f86268 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -3034,12 +3034,12 @@ void swap_shmem_alloc(swp_entry_t entry)
  * if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which
  * might occur if a page table entry has got corrupted.
  */
-int swap_duplicate(swp_entry_t entry)
+int swap_duplicate(swp_entry_t entry, gfp_t gfp_mask)
 {
        int err = 0;
 
        while (!err && __swap_duplicate(entry, 1) == -ENOMEM)
-               err = add_swap_count_continuation(entry, GFP_ATOMIC);
+               err = add_swap_count_continuation(entry, GFP_ATOMIC | gfp_mask);
        return err;
 }


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ