lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250113005626.2290-1-21cnbao@gmail.com>
Date: Mon, 13 Jan 2025 13:56:26 +1300
From: Barry Song <21cnbao@...il.com>
To: lkp@...el.com
Cc: 21cnbao@...il.com,
	akpm@...ux-foundation.org,
	anshuman.khandual@....com,
	baolin.wang@...ux.alibaba.com,
	bp@...en8.de,
	catalin.marinas@....com,
	chrisl@...nel.org,
	dave.hansen@...ux.intel.com,
	david@...hat.com,
	gshan@...hat.com,
	hpa@...or.com,
	ioworker0@...il.com,
	kasong@...cent.com,
	kirill.shutemov@...ux.intel.com,
	linux-arm-kernel@...ts.infradead.org,
	linux-kernel@...r.kernel.org,
	linux-mm@...ck.org,
	mark.rutland@....com,
	mingo@...hat.com,
	oe-kbuild-all@...ts.linux.dev,
	ryan.roberts@....com,
	shahuang@...hat.com,
	tglx@...utronix.de,
	v-songbaohua@...o.com,
	wangkefeng.wang@...wei.com,
	will@...nel.org,
	x86@...nel.org,
	ying.huang@...el.com,
	yosryahmed@...gle.com,
	zhengtangquan@...o.com
Subject: Re: [PATCH 2/3] mm: Support tlbbatch flush for a range of PTEs

On Mon, Jan 6, 2025 at 11:08 PM kernel test robot <lkp@...el.com> wrote:
>
> Hi Barry,
>
> kernel test robot noticed the following build errors:
>
> [auto build test ERROR on akpm-mm/mm-everything]
>
> url:    https://github.com/intel-lab-lkp/linux/commits/Barry-Song/mm-set-folio-swapbacked-iff-folios-are-dirty-in-try_to_unmap_one/20250106-112638
> base:   https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
> patch link:    https://lore.kernel.org/r/20250106031711.82855-3-21cnbao%40gmail.com
> patch subject: [PATCH 2/3] mm: Support tlbbatch flush for a range of PTEs
> config: riscv-randconfig-001-20250106 (https://download.01.org/0day-ci/archive/20250106/202501061736.FoHcInHJ-lkp@intel.com/config)
> compiler: riscv64-linux-gcc (GCC) 14.2.0
> reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250106/202501061736.FoHcInHJ-lkp@intel.com/reproduce)
>
> If you fix the issue in a separate patch/commit (i.e. not just a new version of
> the same patch/commit), kindly add following tags
> | Reported-by: kernel test robot <lkp@...el.com>
> | Closes: https://lore.kernel.org/oe-kbuild-all/202501061736.FoHcInHJ-lkp@intel.com/
>

Sorry. My bad, does the below diff fix the build?

diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index 72e559934952..7f3ea687ce33 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -61,7 +61,8 @@ void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
 bool arch_tlbbatch_should_defer(struct mm_struct *mm);
 void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
 			       struct mm_struct *mm,
-			       unsigned long uaddr);
+			       unsigned long uaddr,
+			       unsigned long size);
 void arch_flush_tlb_batched_pending(struct mm_struct *mm);
 void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
 
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index 9b6e86ce3867..aeda64a36d50 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -187,7 +187,8 @@ bool arch_tlbbatch_should_defer(struct mm_struct *mm)
 
 void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
 			       struct mm_struct *mm,
-			       unsigned long uaddr)
+			       unsigned long uaddr,
+			       unsigned long size)
 {
 	cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
 }

> All errors (new ones prefixed by >>):
>
>    mm/rmap.c: In function 'set_tlb_ubc_flush_pending':
> >> mm/rmap.c:685:9: error: too many arguments to function 'arch_tlbbatch_add_pending'
>      685 |         arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, uaddr, size);
>          |         ^~~~~~~~~~~~~~~~~~~~~~~~~
>    In file included from arch/riscv/include/asm/pgtable.h:113,
>                     from include/linux/pgtable.h:6,
>                     from include/linux/mm.h:30,
>                     from mm/rmap.c:55:
>    arch/riscv/include/asm/tlbflush.h:62:6: note: declared here
>       62 | void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
>          |      ^~~~~~~~~~~~~~~~~~~~~~~~~
>
>
> vim +/arch_tlbbatch_add_pending +685 mm/rmap.c
>
>    663 
>    664  /*
>    665   * Bits 0-14 of mm->tlb_flush_batched record pending generations.
>    666   * Bits 16-30 of mm->tlb_flush_batched bit record flushed generations.
>    667   */
>    668  #define TLB_FLUSH_BATCH_FLUSHED_SHIFT   16
>    669  #define TLB_FLUSH_BATCH_PENDING_MASK                    \
>    670          ((1 << (TLB_FLUSH_BATCH_FLUSHED_SHIFT - 1)) - 1)
>    671  #define TLB_FLUSH_BATCH_PENDING_LARGE                   \
>    672          (TLB_FLUSH_BATCH_PENDING_MASK / 2)
>    673 
>    674  static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval,
>    675                                        unsigned long uaddr,
>    676                                        unsigned long size)
>    677  {
>    678          struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
>    679          int batch;
>    680          bool writable = pte_dirty(pteval);
>    681 
>    682          if (!pte_accessible(mm, pteval))
>    683                  return;
>    684 
>  > 685          arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, uaddr, size);
>    686          tlb_ubc->flush_required = true;
>    687 
>    688          /*
>    689           * Ensure compiler does not re-order the setting of tlb_flush_batched
>    690           * before the PTE is cleared.
>    691           */
>    692          barrier();
>    693          batch = atomic_read(&mm->tlb_flush_batched);
>    694  retry:
>    695          if ((batch & TLB_FLUSH_BATCH_PENDING_MASK) > TLB_FLUSH_BATCH_PENDING_LARGE) {
>    696                  /*
>    697                   * Prevent `pending' from catching up with `flushed' because of
>    698                   * overflow.  Reset `pending' and `flushed' to be 1 and 0 if
>    699                   * `pending' becomes large.
>    700                   */
>    701                  if (!atomic_try_cmpxchg(&mm->tlb_flush_batched, &batch, 1))
>    702                          goto retry;
>    703          } else {
>    704                  atomic_inc(&mm->tlb_flush_batched);
>    705          }
>    706 
>    707          /*
>    708           * If the PTE was dirty then it's best to assume it's writable. The
>    709           * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
>    710           * before the page is queued for IO.
>    711           */
>    712          if (writable)
>    713                  tlb_ubc->writable = true;
>    714  }
>    715 
>
> --
> 0-DAY CI Kernel Test Service
> https://github.com/intel/lkp-tests/wiki

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ