lists.openwall.net | lists / announce owl-users owl-dev john-users john-dev passwdqc-users yescrypt popa3d-users / oss-security kernel-hardening musl sabotage tlsify passwords / crypt-dev xvendor / Bugtraq Full-Disclosure linux-kernel linux-netdev linux-ext4 linux-hardening linux-cve-announce PHC | |
Open Source and information security mailing list archives
| ||
|
Message-ID: <CAJD7tkakVisOAGn6gOTHovBz4=29D2qdzT8dBPgrPNB6Lb6mTg@mail.gmail.com> Date: Fri, 6 Sep 2024 15:55:52 -0700 From: Yosry Ahmed <yosryahmed@...gle.com> To: Barry Song <21cnbao@...il.com> Cc: akpm@...ux-foundation.org, baolin.wang@...ux.alibaba.com, chrisl@...nel.org, david@...hat.com, hanchuanhua@...o.com, hannes@...xchg.org, hch@...radead.org, hughd@...gle.com, kaleshsingh@...gle.com, kasong@...cent.com, linux-kernel@...r.kernel.org, linux-mm@...ck.org, mhocko@...e.com, minchan@...nel.org, nphamcs@...il.com, ryan.roberts@....com, ryncsn@...il.com, senozhatsky@...omium.org, shakeel.butt@...ux.dev, shy828301@...il.com, surenb@...gle.com, usamaarif642@...il.com, v-songbaohua@...o.com, willy@...radead.org, xiang@...nel.org, ying.huang@...el.com Subject: Re: [PATCH v8 1/3] mm: Fix swap_read_folio_zeromap() for large folios with partial zeromap [..] > Yep :-) with the above two changes, the patch becomes: > > From 272c04cb758b8062eaa96a52b855ff79c8afdf6a Mon Sep 17 00:00:00 2001 > From: Barry Song <v-songbaohua@...o.com> > Date: Thu, 5 Sep 2024 11:56:03 +1200 > Subject: [PATCH v8 1/3] mm: Fix swap_read_folio_zeromap() for large folios > with partial zeromap > > There could be a corner case where the first entry is non-zeromap, > but a subsequent entry is zeromap. In this case, we should not > let swap_read_folio_zeromap() return false since we will still > read corrupted data. > > Additionally, the iteration of test_bit() is unnecessary and > can be replaced with bitmap operations, which are more efficient. > > We can adopt the style of swap_pte_batch() and folio_pte_batch() to > introduce swap_zeromap_batch() which seems to provide the greatest > flexibility for the caller. This approach allows the caller to either > check if the zeromap status of all entries is consistent or determine > the number of contiguous entries with the same status. > > Since swap_read_folio() can't handle reading a large folio that's > partially zeromap and partially non-zeromap, we've moved the code > to mm/swap.h so that others, like those working on swap-in, can > access it. > > Fixes: 0ca0c24e3211 ("mm: store zero pages to be swapped out in a bitmap") > Cc: Usama Arif <usamaarif642@...il.com> > Cc: Yosry Ahmed <yosryahmed@...gle.com> > Signed-off-by: Barry Song <v-songbaohua@...o.com> Reviewed-by: Yosry Ahmed <yosryahmed@...gle.com> > --- > mm/page_io.c | 32 +++++++------------------------- > mm/swap.h | 33 +++++++++++++++++++++++++++++++++ > 2 files changed, 40 insertions(+), 25 deletions(-) > > diff --git a/mm/page_io.c b/mm/page_io.c > index 4bc77d1c6bfa..bc1183299a7d 100644 > --- a/mm/page_io.c > +++ b/mm/page_io.c > @@ -226,26 +226,6 @@ static void swap_zeromap_folio_clear(struct folio *folio) > } > } > > -/* > - * Return the index of the first subpage which is not zero-filled > - * according to swap_info_struct->zeromap. > - * If all pages are zero-filled according to zeromap, it will return > - * folio_nr_pages(folio). > - */ > -static unsigned int swap_zeromap_folio_test(struct folio *folio) > -{ > - struct swap_info_struct *sis = swp_swap_info(folio->swap); > - swp_entry_t entry; > - unsigned int i; > - > - for (i = 0; i < folio_nr_pages(folio); i++) { > - entry = page_swap_entry(folio_page(folio, i)); > - if (!test_bit(swp_offset(entry), sis->zeromap)) > - return i; > - } > - return i; > -} > - > /* > * We may have stale swap cache pages in memory: notice > * them here and get rid of the unnecessary final write. > @@ -524,19 +504,21 @@ static void sio_read_complete(struct kiocb *iocb, long ret) > > static bool swap_read_folio_zeromap(struct folio *folio) > { > - unsigned int idx = swap_zeromap_folio_test(folio); > - > - if (idx == 0) > - return false; > + int nr_pages = folio_nr_pages(folio); > + bool is_zeromap; > > /* > * Swapping in a large folio that is partially in the zeromap is not > * currently handled. Return true without marking the folio uptodate so > * that an IO error is emitted (e.g. do_swap_page() will sigbus). > */ > - if (WARN_ON_ONCE(idx < folio_nr_pages(folio))) > + if (WARN_ON_ONCE(swap_zeromap_batch(folio->swap, nr_pages, > + &is_zeromap) != nr_pages)) > return true; > > + if (!is_zeromap) > + return false; > + > folio_zero_range(folio, 0, folio_size(folio)); > folio_mark_uptodate(folio); > return true; > diff --git a/mm/swap.h b/mm/swap.h > index f8711ff82f84..e0397a197620 100644 > --- a/mm/swap.h > +++ b/mm/swap.h > @@ -80,6 +80,32 @@ static inline unsigned int folio_swap_flags(struct folio *folio) > { > return swp_swap_info(folio->swap)->flags; > } > + > +/* > + * Return the count of contiguous swap entries that share the same > + * zeromap status as the starting entry. If is_zeromap is not NULL, > + * it will return the zeromap status of the starting entry. > + */ > +static inline int swap_zeromap_batch(swp_entry_t entry, int max_nr, > + bool *is_zeromap) > +{ > + struct swap_info_struct *sis = swp_swap_info(entry); > + unsigned long start = swp_offset(entry); > + unsigned long end = start + max_nr; > + bool first_bit; > + > + first_bit = test_bit(start, sis->zeromap); > + if (is_zeromap) > + *is_zeromap = first_bit; > + > + if (max_nr <= 1) > + return max_nr; > + if (first_bit) > + return find_next_zero_bit(sis->zeromap, end, start) - start; > + else > + return find_next_bit(sis->zeromap, end, start) - start; > +} > + > #else /* CONFIG_SWAP */ > struct swap_iocb; > static inline void swap_read_folio(struct folio *folio, struct swap_iocb **plug) > @@ -171,6 +197,13 @@ static inline unsigned int folio_swap_flags(struct folio *folio) > { > return 0; > } > + > +static inline int swap_zeromap_batch(swp_entry_t entry, int max_nr, > + bool *has_zeromap) > +{ > + return 0; > +} > + > #endif /* CONFIG_SWAP */ > > #endif /* _MM_SWAP_H */ > -- > 2.34.1 > > Thanks > Barry >
Powered by blists - more mailing lists