lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251003111032.00004688@huawei.com>
Date: Fri, 3 Oct 2025 11:10:32 +0100
From: Jonathan Cameron <jonathan.cameron@...wei.com>
To: Raghavendra K T <raghavendra.kt@....com>
CC: <AneeshKumar.KizhakeVeetil@....com>, <Michael.Day@....com>,
	<akpm@...ux-foundation.org>, <bharata@....com>, <dave.hansen@...el.com>,
	<david@...hat.com>, <dongjoo.linux.dev@...il.com>, <feng.tang@...el.com>,
	<gourry@...rry.net>, <hannes@...xchg.org>, <honggyu.kim@...com>,
	<hughd@...gle.com>, <jhubbard@...dia.com>, <jon.grimm@....com>,
	<k.shutemov@...il.com>, <kbusch@...a.com>, <kmanaouil.dev@...il.com>,
	<leesuyeon0506@...il.com>, <leillc@...gle.com>, <liam.howlett@...cle.com>,
	<linux-kernel@...r.kernel.org>, <linux-mm@...ck.org>,
	<mgorman@...hsingularity.net>, <mingo@...hat.com>, <nadav.amit@...il.com>,
	<nphamcs@...il.com>, <peterz@...radead.org>, <riel@...riel.com>,
	<rientjes@...gle.com>, <rppt@...nel.org>, <santosh.shukla@....com>,
	<shivankg@....com>, <shy828301@...il.com>, <sj@...nel.org>, <vbabka@...e.cz>,
	<weixugc@...gle.com>, <willy@...radead.org>, <ying.huang@...ux.alibaba.com>,
	<ziy@...dia.com>, <dave@...olabs.net>, <yuanchu@...gle.com>,
	<kinseyho@...gle.com>, <hdanton@...a.com>, <harry.yoo@...cle.com>
Subject: Re: [RFC PATCH V3 11/17] mm/kscand: Implement migration failure
 feedback

On Thu, 14 Aug 2025 15:33:01 +0000
Raghavendra K T <raghavendra.kt@....com> wrote:

>  Before this, scanning kthread continues to scan even after
> migration fails. To control migration, scanning is slowed down
> based on the failure/success ratio obtained from migration
> thread.
> 
>  Decaying failure ratio is maintained for 1024 migration window.
> The ratio further contributes to approximately 10% scaling of
> scan_period.
Perhaps it's worth adding a cover letter section describing all the
heuristics briefly so we have a central place to understand what
needs tuning against workloads before this merges?

J

> 
> Signed-off-by: Raghavendra K T <raghavendra.kt@....com>
> ---
>  mm/kscand.c | 55 +++++++++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 55 insertions(+)
> 
> diff --git a/mm/kscand.c b/mm/kscand.c
> index bf975e82357d..41321d373be7 100644
> --- a/mm/kscand.c
> +++ b/mm/kscand.c
> @@ -146,6 +146,8 @@ struct kmigrated_mm_slot {
>  	spinlock_t migrate_lock;
>  	/* Head of per mm migration list */
>  	struct list_head migrate_head;
> +	/* Indicates weighted success, failure */
> +	int msuccess, mfailed, fratio;
>  };
>  
>  /* System wide list of mms that maintain migration list */
> @@ -812,13 +814,45 @@ static void kscand_collect_mm_slot(struct kscand_mm_slot *mm_slot)
>  	}
>  }
>  
> +static int kmigrated_get_mstat_fratio(struct mm_struct *mm)
> +{
> +	int fratio = 0;
> +	struct kmigrated_mm_slot *mm_slot = NULL;
> +	struct mm_slot *slot;
> +
> +	guard(spinlock)(&kscand_migrate_lock);
> +
> +	slot = mm_slot_lookup(kmigrated_slots_hash, mm);
> +	mm_slot = mm_slot_entry(slot, struct kmigrated_mm_slot, mm_slot);
> +
> +	if (mm_slot)
> +		fratio =  mm_slot->fratio;

Extra space after =

> +
> +	return fratio;
> +}
> +
> +static void update_mstat_ratio(struct kmigrated_mm_slot *mm_slot,
> +				int msuccess, int mfailed)
> +{
> +	mm_slot->msuccess = (mm_slot->msuccess >> 2) + msuccess;
> +	mm_slot->mfailed = (mm_slot->mfailed >> 2) + mfailed;
> +	mm_slot->fratio = mm_slot->mfailed * 100;
> +	mm_slot->fratio /=  (mm_slot->msuccess + mm_slot->mfailed);

extra space after =

> +}
> +
> +#define MSTAT_UPDATE_FREQ	1024
> +
>  static void kmigrated_migrate_mm(struct kmigrated_mm_slot *mm_slot)
>  {
> +	int mfailed = 0;
> +	int msuccess = 0;
> +	int mstat_counter;
>  	int ret = 0, dest = -1;
>  	struct mm_slot *slot;
>  	struct mm_struct *mm;
>  	struct kscand_migrate_info *info, *tmp;
>  
> +	mstat_counter = MSTAT_UPDATE_FREQ;

Might as well set at declaration above.

>  	spin_lock(&mm_slot->migrate_lock);
>  
>  	slot = &mm_slot->mm_slot;
> @@ -842,11 +876,23 @@ static void kmigrated_migrate_mm(struct kmigrated_mm_slot *mm_slot)
>  			}
>  
>  			ret = kmigrated_promote_folio(info, mm, dest);
> +			mstat_counter--;
> +
> +			/* TBD: encode migrated count here, currently assume folio_nr_pages */
> +			if (!ret)
> +				msuccess++;
> +			else
> +				mfailed++;
>  
>  			kfree(info);
>  
>  			cond_resched();
>  			spin_lock(&mm_slot->migrate_lock);
> +			if (!mstat_counter) {
> +				update_mstat_ratio(mm_slot, msuccess, mfailed);
> +				msuccess  = mfailed = 0;

extra space before =

> +				mstat_counter = MSTAT_UPDATE_FREQ;
> +			}
>  		}
>  	}

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ