lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAJuCfpH7cEuwobS3c=O0MzKtZi+6d2yK++YgSp7GViWPiJsgCg@mail.gmail.com>
Date:   Fri, 28 Feb 2020 14:20:27 -0800
From:   Suren Baghdasaryan <surenb@...gle.com>
To:     Minchan Kim <minchan@...nel.org>
Cc:     Andrew Morton <akpm@...ux-foundation.org>,
        LKML <linux-kernel@...r.kernel.org>,
        linux-mm <linux-mm@...ck.org>, linux-api@...r.kernel.org,
        oleksandr@...hat.com, Tim Murray <timmurray@...gle.com>,
        Daniel Colascione <dancol@...gle.com>,
        Sandeep Patil <sspatil@...gle.com>,
        Sonny Rao <sonnyrao@...gle.com>,
        Brian Geffon <bgeffon@...gle.com>,
        Michal Hocko <mhocko@...e.com>,
        Johannes Weiner <hannes@...xchg.org>,
        Shakeel Butt <shakeelb@...gle.com>,
        John Dias <joaodias@...gle.com>,
        Joel Fernandes <joel@...lfernandes.org>, sj38.park@...il.com,
        alexander.h.duyck@...ux.intel.com, Jann Horn <jannh@...gle.com>
Subject: Re: [PATCH v6 3/7] mm: check fatal signal pending of target process

On Tue, Feb 18, 2020 at 5:44 PM Minchan Kim <minchan@...nel.org> wrote:
>
> Bail out to prevent unnecessary CPU overhead if target process has
> pending fatal signal during (MADV_COLD|MADV_PAGEOUT) operation.
>
> Signed-off-by: Minchan Kim <minchan@...nel.org>
> ---
>  mm/madvise.c | 29 +++++++++++++++++++++--------
>  1 file changed, 21 insertions(+), 8 deletions(-)
>
> diff --git a/mm/madvise.c b/mm/madvise.c
> index f29155b8185d..def1507c2030 100644
> --- a/mm/madvise.c
> +++ b/mm/madvise.c
> @@ -36,6 +36,7 @@
>  struct madvise_walk_private {
>         struct mmu_gather *tlb;
>         bool pageout;
> +       struct task_struct *target_task;
>  };
>
>  /*
> @@ -316,6 +317,10 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
>         if (fatal_signal_pending(current))
>                 return -EINTR;
>
> +       if (private->target_task &&
> +                       fatal_signal_pending(private->target_task))
> +               return -EINTR;
> +
>  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
>         if (pmd_trans_huge(*pmd)) {
>                 pmd_t orig_pmd;
> @@ -471,12 +476,14 @@ static const struct mm_walk_ops cold_walk_ops = {
>  };
>
>  static void madvise_cold_page_range(struct mmu_gather *tlb,
> +                            struct task_struct *task,
>                              struct vm_area_struct *vma,
>                              unsigned long addr, unsigned long end)
>  {
>         struct madvise_walk_private walk_private = {
>                 .pageout = false,
>                 .tlb = tlb,
> +               .target_task = task,
>         };
>
>         tlb_start_vma(tlb, vma);
> @@ -484,7 +491,8 @@ static void madvise_cold_page_range(struct mmu_gather *tlb,
>         tlb_end_vma(tlb, vma);
>  }
>
> -static long madvise_cold(struct vm_area_struct *vma,
> +static long madvise_cold(struct task_struct *task,
> +                       struct vm_area_struct *vma,
>                         struct vm_area_struct **prev,
>                         unsigned long start_addr, unsigned long end_addr)
>  {
> @@ -497,19 +505,21 @@ static long madvise_cold(struct vm_area_struct *vma,
>
>         lru_add_drain();
>         tlb_gather_mmu(&tlb, mm, start_addr, end_addr);
> -       madvise_cold_page_range(&tlb, vma, start_addr, end_addr);
> +       madvise_cold_page_range(&tlb, task, vma, start_addr, end_addr);
>         tlb_finish_mmu(&tlb, start_addr, end_addr);
>
>         return 0;
>  }
>
>  static void madvise_pageout_page_range(struct mmu_gather *tlb,
> +                            struct task_struct *task,
>                              struct vm_area_struct *vma,
>                              unsigned long addr, unsigned long end)
>  {
>         struct madvise_walk_private walk_private = {
>                 .pageout = true,
>                 .tlb = tlb,
> +               .target_task = task,
>         };
>
>         tlb_start_vma(tlb, vma);
> @@ -533,7 +543,8 @@ static inline bool can_do_pageout(struct vm_area_struct *vma)
>                 inode_permission(file_inode(vma->vm_file), MAY_WRITE) == 0;
>  }
>
> -static long madvise_pageout(struct vm_area_struct *vma,
> +static long madvise_pageout(struct task_struct *task,
> +                       struct vm_area_struct *vma,
>                         struct vm_area_struct **prev,
>                         unsigned long start_addr, unsigned long end_addr)
>  {
> @@ -549,7 +560,7 @@ static long madvise_pageout(struct vm_area_struct *vma,
>
>         lru_add_drain();
>         tlb_gather_mmu(&tlb, mm, start_addr, end_addr);
> -       madvise_pageout_page_range(&tlb, vma, start_addr, end_addr);
> +       madvise_pageout_page_range(&tlb, task, vma, start_addr, end_addr);
>         tlb_finish_mmu(&tlb, start_addr, end_addr);
>
>         return 0;
> @@ -929,7 +940,8 @@ static int madvise_inject_error(int behavior,
>  #endif
>
>  static long
> -madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
> +madvise_vma(struct task_struct *task, struct vm_area_struct *vma,
> +               struct vm_area_struct **prev,
>                 unsigned long start, unsigned long end, int behavior)
>  {
>         switch (behavior) {
> @@ -938,9 +950,9 @@ madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
>         case MADV_WILLNEED:
>                 return madvise_willneed(vma, prev, start, end);
>         case MADV_COLD:
> -               return madvise_cold(vma, prev, start, end);
> +               return madvise_cold(task, vma, prev, start, end);
>         case MADV_PAGEOUT:
> -               return madvise_pageout(vma, prev, start, end);
> +               return madvise_pageout(task, vma, prev, start, end);
>         case MADV_FREE:
>         case MADV_DONTNEED:
>                 return madvise_dontneed_free(vma, prev, start, end, behavior);
> @@ -1140,7 +1152,8 @@ int do_madvise(struct task_struct *target_task, struct mm_struct *mm,
>                         tmp = end;
>
>                 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
> -               error = madvise_vma(vma, &prev, start, tmp, behavior);
> +               error = madvise_vma(target_task, vma, &prev,
> +                                       start, tmp, behavior);
>                 if (error)
>                         goto out;
>                 start = tmp;
> --
> 2.25.0.265.gbab2e86ba0-goog
>

Reviewed-by: Suren Baghdasaryan <surenb@...gle.com>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ