lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAGsJ_4xR949nPauJ4kZpcg3cRwvcrHHWU7oSazpfGR2Tv5XNTA@mail.gmail.com>
Date: Wed, 7 Aug 2024 04:44:44 +0800
From: Barry Song <21cnbao@...il.com>
To: David Hildenbrand <david@...hat.com>
Cc: akpm@...ux-foundation.org, linux-mm@...ck.org, 
	linux-kernel@...r.kernel.org, Barry Song <v-songbaohua@...o.com>, 
	Kairui Song <kasong@...cent.com>, Chris Li <chrisl@...nel.org>, 
	"Huang, Ying" <ying.huang@...el.com>, Hugh Dickins <hughd@...gle.com>, 
	Kalesh Singh <kaleshsingh@...gle.com>, Ryan Roberts <ryan.roberts@....com>
Subject: Re: [PATCH] mm: attempt to batch free swap entries for zap_pte_range()

On Tue, Aug 6, 2024 at 8:56 PM David Hildenbrand <david@...hat.com> wrote:
>
> On 06.08.24 03:24, Barry Song wrote:
> > From: Barry Song <v-songbaohua@...o.com>
> >
> > Zhiguo reported that swap release could be a serious bottleneck
> > during process exits[1]. With mTHP, we have the opportunity to
> > batch free swaps.
> > Thanks to the work of Chris and Kairui[2], I was able to achieve
> > this optimization with minimal code changes by building on their
> > efforts.
> > If swap_count is 1, which is likely true as most anon memory are
> > private, we can free all contiguous swap slots all together.
> >
> > Ran the below test program for measuring the bandwidth of munmap
> > using zRAM and 64KiB mTHP:
> >
> >   #include <sys/mman.h>
> >   #include <sys/time.h>
> >   #include <stdlib.h>
> >
> >   unsigned long long tv_to_ms(struct timeval tv)
> >   {
> >          return tv.tv_sec * 1000 + tv.tv_usec / 1000;
> >   }
> >
> >   main()
> >   {
> >          struct timeval tv_b, tv_e;
> >          int i;
> >   #define SIZE 1024*1024*1024
> >          void *p = mmap(NULL, SIZE, PROT_READ | PROT_WRITE,
> >                                  MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
> >          if (!p) {
> >                  perror("fail to get memory");
> >                  exit(-1);
> >          }
> >
> >          madvise(p, SIZE, MADV_HUGEPAGE);
> >          memset(p, 0x11, SIZE); /* write to get mem */
> >
> >          madvise(p, SIZE, MADV_PAGEOUT);
> >
> >          gettimeofday(&tv_b, NULL);
> >          munmap(p, SIZE);
> >          gettimeofday(&tv_e, NULL);
> >
> >          printf("munmap in bandwidth: %ld bytes/ms\n",
> >                          SIZE/(tv_to_ms(tv_e) - tv_to_ms(tv_b)));
> >   }
> >
> > The result is as below (munmap bandwidth):
> >                  mm-unstable  mm-unstable-with-patch
> >     round1       21053761      63161283
> >     round2       21053761      63161283
> >     round3       21053761      63161283
> >     round4       20648881      67108864
> >     round5       20648881      67108864
> >
> > munmap bandwidth becomes 3X faster.
> >
> > [1] https://lore.kernel.org/linux-mm/20240731133318.527-1-justinjiang@vivo.com/
> > [2] https://lore.kernel.org/linux-mm/20240730-swap-allocator-v5-0-cb9c148b9297@kernel.org/
> >
> > Cc: Kairui Song <kasong@...cent.com>
> > Cc: Chris Li <chrisl@...nel.org>
> > Cc: "Huang, Ying" <ying.huang@...el.com>
> > Cc: Hugh Dickins <hughd@...gle.com>
> > Cc: Kalesh Singh <kaleshsingh@...gle.com>
> > Cc: Ryan Roberts <ryan.roberts@....com>
> > Cc: David Hildenbrand <david@...hat.com>
> > Signed-off-by: Barry Song <v-songbaohua@...o.com>
> > ---
> >   mm/swapfile.c | 61 +++++++++++++++++++++++++++++++++++++++++++++++++++
> >   1 file changed, 61 insertions(+)
> >
> > diff --git a/mm/swapfile.c b/mm/swapfile.c
> > index ea023fc25d08..ed872a186e81 100644
> > --- a/mm/swapfile.c
> > +++ b/mm/swapfile.c
> > @@ -156,6 +156,25 @@ static bool swap_is_has_cache(struct swap_info_struct *si,
> >       return true;
> >   }
> >
> > +static bool swap_is_last_map(struct swap_info_struct *si,
> > +                           unsigned long offset, int nr_pages,
> > +                           bool *has_cache)
> > +{
> > +     unsigned char *map = si->swap_map + offset;
> > +     unsigned char *map_end = map + nr_pages;
> > +     bool cached = false;
> > +
> > +     do {
> > +             if ((*map & ~SWAP_HAS_CACHE) != 1)
> > +                     return false;
> > +             if (*map & SWAP_HAS_CACHE)
> > +                     cached = true;
> > +     } while (++map < map_end);
> > +
> > +     *has_cache = cached;
> > +     return true;
> > +}
> > +
> >   /*
> >    * returns number of pages in the folio that backs the swap entry. If positive,
> >    * the folio was reclaimed. If negative, the folio was not reclaimed. If 0, no
> > @@ -1469,6 +1488,39 @@ static unsigned char __swap_entry_free(struct swap_info_struct *p,
> >       return usage;
> >   }
> >
> > +static bool try_batch_swap_entries_free(struct swap_info_struct *p,
>
> Why call it "p" here and not "si" like in the other code you are touching?

that is because I found other _free_ functions are all using "p":

static unsigned char __swap_entry_free(struct swap_info_struct *p,
      swp_entry_t entry)
{
...
}

static void swap_entry_free(struct swap_info_struct *p, swp_entry_t entry)
{
...
}

For sure I can move from "p" to "si".

>
> > +             swp_entry_t entry, int nr, bool *any_only_cache)
> > +{
> > +     unsigned long offset = swp_offset(entry);
> > +     struct swap_cluster_info *ci;
> > +     bool has_cache = false;
> > +     bool can_batch;
> > +     int i;
> > +
> > +     /* cross into another cluster */
> > +     if (nr > SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER)
> > +             return false;
> > +     ci = lock_cluster_or_swap_info(p, offset);
> > +     can_batch = swap_is_last_map(p, offset, nr, &has_cache);
> > +     if (can_batch) {
> > +             for (i = 0; i < nr; i++)
> > +                     WRITE_ONCE(p->swap_map[offset + i], SWAP_HAS_CACHE);
> > +     }
> > +     unlock_cluster_or_swap_info(p, ci);
> > +
> > +     /* all swap_maps have count==1 and have no swapcache */
> > +     if (!can_batch)
> > +             goto out;
> > +     if (!has_cache) {
> > +             spin_lock(&p->lock);
> > +             swap_entry_range_free(p, entry, nr);
> > +             spin_unlock(&p->lock);
> > +     }
> > +     *any_only_cache = has_cache;
> > +out:
> > +     return can_batch;
> > +}
> > +
> >   /*
> >    * Drop the last HAS_CACHE flag of swap entries, caller have to
> >    * ensure all entries belong to the same cgroup.
> > @@ -1797,6 +1849,7 @@ void free_swap_and_cache_nr(swp_entry_t entry, int nr)
> >       bool any_only_cache = false;
> >       unsigned long offset;
> >       unsigned char count;
> > +     bool batched;
> >
> >       if (non_swap_entry(entry))
> >               return;
> > @@ -1808,6 +1861,13 @@ void free_swap_and_cache_nr(swp_entry_t entry, int nr)
> >       if (WARN_ON(end_offset > si->max))
> >               goto out;
> >
> > +     if (nr > 1 && swap_count(data_race(si->swap_map[start_offset]) == 1)) {
> > +             batched = try_batch_swap_entries_free(si, entry, nr,
> > +                                             &any_only_cache);
> > +             if (batched)
> > +                     goto reclaim;
> > +     }
> > +
>
> I'm wondering if we could find a way to clean this up to achieve here:
>
>
> if (WARN_ON(end_offset > si->max))
>         goto out;
>
> /*
>   * First free all entries in the range.$
>   */
> any_only_cache = __free_swap_entries(si, entry, nr);
>
> /*
>   * Short-circuit the below loop if none of the entries had their
>   * reference drop to zero.
>   */
> if (!any_only_cache)
>         goto out;
>
>
>
>
> Whereby move the fallback loop in that new function
>
> static bool __free_swap_entries(struct swap_info_struct *si,
>                 swp_entry_t entry, int nr)
> {
>         const unsigned long start_offset = swp_offset(entry);
>         const unsigned long end_offset = start_offset + nr;
>         bool any_only_cache = false;
>
>         if (nr > 1 && swap_count(data_race(si->swap_map[start_offset]) == 1)) {
>                 [... what try_batch_swap_entries_free() would do ...]
>         }
>
> fallback:
>         for (offset = start_offset; offset < end_offset; offset++) {
>                 if (data_race(si->swap_map[offset])) {
>                 [... what the fallback code would do ...]
>         }
>         return any_only_cache;
> }
>
>

good suggestion. will do this in v2.

> >       /*
> >        * First free all entries in the range.
> >        */
> > @@ -1821,6 +1881,7 @@ void free_swap_and_cache_nr(swp_entry_t entry, int nr)
> >               }
> >       }
> >
> > +reclaim:
> >       /*
> >        * Short-circuit the below loop if none of the entries had their
> >        * reference drop to zero.
>
> --
> Cheers,
>
> David / dhildenb
>

Thanks
Barry

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ